[
  {
    "path": ".github/workflows/lint.yml",
    "content": "name: lint\n\non: [push, pull_request]\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n      - uses: actions/checkout@v2\n      - name: Set up Python 3.7\n        uses: actions/setup-python@v2\n        with:\n          python-version: 3.7\n      - name: Install pre-commit hook\n        run: |\n          pip install pre-commit\n          pre-commit install\n      - name: Linting\n        run: pre-commit run --all-files\n      - name: Format c/cuda codes with clang-format\n        uses: DoozyX/clang-format-lint-action@v0.11\n        with:\n          source: bevdepth/ops\n          extensions: h,c,cpp,hpp,cu,cuh\n          style: google\n      - name: Check docstring coverage\n        run: |\n          pip install interrogate\n          interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex \"__repr__\" -e 'bevdepth/exps' -e 'test/' -e 'scripts' -e 'setup.py' -e 'bevdepth/ops' -e 'bevdepth/utils/' --fail-under 50\n"
  },
  {
    "path": ".gitignore",
    "content": "### Linux ###\n*~\n\n# temporary files which can be created if a process still has a handle open of a deleted file\n.fuse_hidden*\n\n# KDE directory preferences\n.directory\n\n# Linux trash folder which might appear on any partition or disk\n.Trash-*\n\n# .nfs files are created when an open file is removed but is still being accessed\n.nfs*\n\n### PyCharm ###\n# User-specific stuff\n.idea\n\n# CMake\ncmake-build-*/\n\n# Mongo Explorer plugin\n.idea/**/mongoSettings.xml\n\n# File-based project format\n*.iws\n\n# IntelliJ\nout/\n\n# mpeltonen/sbt-idea plugin\n.idea_modules/\n\n# JIRA plugin\natlassian-ide-plugin.xml\n\n# Cursive Clojure plugin\n.idea/replstate.xml\n\n# Crashlytics plugin (for Android Studio and IntelliJ)\ncom_crashlytics_export_strings.xml\ncrashlytics.properties\ncrashlytics-build.properties\nfabric.properties\n\n# Editor-based Rest Client\n.idea/httpRequests\n\n# Android studio 3.1+ serialized cache file\n.idea/caches/build_file_checksums.ser\n\n# JetBrains templates\n**___jb_tmp___\n\n### Python ###\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\ndocs/build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don’t work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n### Vim ###\n# Swap\n[._]*.s[a-v][a-z]\n[._]*.sw[a-p]\n[._]s[a-rt-v][a-z]\n[._]ss[a-gi-z]\n[._]sw[a-p]\n\n# Session\nSession.vim\n\n# Temporary\n.netrwhist\n# Auto-generated tag files\ntags\n# Persistent undo\n[._]*.un~\n\n### Researcher ###\n# output\ntrain_log\ndocs/api\n.code-workspace.code-workspace\noutput\noutputs\ninstant_test_output\ninference_test_output\n*.pkl\n*.npy\n*.pth\nevents.out.tfevents*\n\n# vscode\n*.code-workspace\n.vscode\n\n# vim\n.vim\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "repos:\n  - repo: https://github.com/PyCQA/flake8\n    rev: 5.0.4\n    hooks:\n      - id: flake8\n  - repo: https://github.com/PyCQA/isort\n    rev: 5.10.1\n    hooks:\n      - id: isort\n  - repo: https://github.com/pre-commit/mirrors-yapf\n    rev: v0.32.0\n    hooks:\n      - id: yapf\n  - repo: https://github.com/pre-commit/pre-commit-hooks\n    rev: v4.3.0\n    hooks:\n      - id: trailing-whitespace\n      - id: check-yaml\n      - id: end-of-file-fixer\n      - id: requirements-txt-fixer\n      - id: double-quote-string-fixer\n      - id: check-merge-conflict\n      - id: fix-encoding-pragma\n        args: [\"--remove\"]\n      - id: mixed-line-ending\n        args: [\"--fix=lf\"]\n  - repo: https://github.com/codespell-project/codespell\n    rev: v2.2.1\n    hooks:\n      - id: codespell\n"
  },
  {
    "path": "LICENSE.md",
    "content": "MIT License\n\nCopyright (c) 2022 Megvii-BaseDetection\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "## BEVDepth\nBEVDepth is a new 3D object detector with a trustworthy depth\nestimation. For more details, please refer to our [paper on Arxiv](https://arxiv.org/abs/2206.10092).\n\n<img src=\"assets/bevdepth.png\" width=\"1000\" >\n\n## BEVStereo\nBEVStereo is a new multi-view 3D object detector using temporal stereo to enhance depth estimation.\n<img src=\"assets/bevstereo.png\" width=\"1000\" >\n\n## MatrixVT\n[MatrixVT](bevdepth/exps/nuscenes/MatrixVT/matrixvt_bev_depth_lss_r50_256x704_128x128_24e_ema.py) is a novel View Transformer for BEV paradigm with high efficiency and without customized operators. For more details, please refer to our [paper on Arxiv](https://arxiv.org/abs/2211.10593). Try MatrixVT on **CPU** by run [this file](bevdepth/layers/backbones/matrixvt.py) !\n<img src=\"assets/matrixvt.jpg\" width=\"1000\" >\n\n## Updates!!\n* 【2022/12/06】 We released our new View Transformer (MatrixVT), the paper is on [Arxiv](https://arxiv.org/abs/2211.10593).\n* 【2022/11/30】 We updated our paper(BEVDepth) on [Arxiv](https://arxiv.org/abs/2206.10092).\n* 【2022/11/18】 Both BEVDepth and BEVStereo were accepted by AAAI'2023.\n* 【2022/09/22】 We released our paper(BEVStereo) on [Arxiv](https://arxiv.org/abs/2209.10248).\n* 【2022/08/24】 We submitted our result(BEVStereo) on [nuScenes Detection Task](https://nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera) and achieved the SOTA.\n* 【2022/06/23】 We submitted our result(BEVDepth) without extra data on [nuScenes Detection Task](https://nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera) and achieved the SOTA.\n* 【2022/06/21】 We released our paper(BEVDepth) on [Arxiv](https://arxiv.org/abs/2206.10092).\n* 【2022/04/11】 We submitted our result(BEVDepth) on [nuScenes Detection Task](https://nuscenes.org/object-detection?externalData=all&mapData=all&modalities=Camera) and achieved the SOTA.\n\n\n## Quick Start\n### Installation\n**Step 0.** Install [pytorch](https://pytorch.org/)(v1.9.0).\n\n**Step 1.** Install [MMDetection3D](https://github.com/open-mmlab/mmdetection3d)(v1.0.0rc4).\n\n**Step 2.** Install requirements.\n```shell\npip install -r requirements.txt\n```\n**Step 3.** Install BEVDepth(gpu required).\n```shell\npython setup.py develop\n```\n\n### Data preparation\n**Step 0.** Download nuScenes official dataset.\n\n**Step 1.** Symlink the dataset root to `./data/`.\n```\nln -s [nuscenes root] ./data/\n```\nThe directory will be as follows.\n```\nBEVDepth\n├── data\n│   ├── nuScenes\n│   │   ├── maps\n│   │   ├── samples\n│   │   ├── sweeps\n│   │   ├── v1.0-test\n|   |   ├── v1.0-trainval\n```\n**Step 2.** Prepare infos.\n```\npython scripts/gen_info.py\n```\n\n### Tutorials\n**Train.**\n```\npython [EXP_PATH] --amp_backend native -b 8 --gpus 8\n```\n**Eval.**\n```\npython [EXP_PATH] --ckpt_path [CKPT_PATH] -e -b 8 --gpus 8\n```\n\n### Benchmark\n|Exp |EMA| CBGS |mAP |mATE| mASE | mAOE |mAVE| mAAE | NDS | weights |\n| ------ | :---: | :---: | :---:       |:---:     |:---:  | :---: | :----: | :----: | :----: | :----: |\n|[BEVDepth](bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_24e_2key.py)| | |0.3304| 0.7021| 0.2795| 0.5346| 0.5530| 0.2274| 0.4355 | [github](https://github.com/Megvii-BaseDetection/BEVDepth/releases/download/v0.0.2/bev_depth_lss_r50_256x704_128x128_24e_2key.pth)\n|[BEVDepth](bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_24e_2key_ema.py)|√ | |0.3329 |  0.6832     |0.2761 | 0.5446 | 0.5258 | 0.2259 | 0.4409 | [github](https://github.com/Megvii-BaseDetection/BEVDepth/releases/download/v0.0.2/bev_depth_lss_r50_256x704_128x128_24e_2key_ema.pth)\n|[BEVDepth](bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da.py)| |√ |0.3484| 0.6159| 0.2716| 0.4144| 0.4402| 0.1954| 0.4805 | [github](https://github.com/Megvii-BaseDetection/BEVDepth/releases/download/v0.0.2/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da.pth)\n|[BEVDepth](bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema.py)|√  |√ |0.3589 |  0.6119     |0.2692 | 0.5074 | 0.4086 | 0.2009 | 0.4797 | [github](https://github.com/Megvii-BaseDetection/BEVDepth/releases/download/v0.0.2/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema.pth) |\n|[BEVStereo](bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_2key.py)|  | |0.3456 | 0.6589 | 0.2774 | 0.5500 | 0.4980 | 0.2278 | 0.4516 | [github](https://github.com/Megvii-BaseDetection/BEVStereo/releases/download/v0.0.2/bev_stereo_lss_r50_256x704_128x128_24e_2key.pth) |\n|[BEVStereo](bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_2key_ema.py)|√  | |0.3494|\t0.6671|\t0.2785|\t0.5606|\t0.4686|\t0.2295|\t0.4543 | [github](https://github.com/Megvii-BaseDetection/BEVStereo/releases/download/v0.0.2/bev_stereo_lss_r50_256x704_128x128_24e_2key_ema.pth) |\n|[BEVStereo](bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_key4.py)|  | |0.3427|\t0.6560|\t0.2784|\t0.5982|\t0.5347|\t0.2228|\t0.4423 | [github](https://github.com/Megvii-BaseDetection/BEVStereo/releases/download/v0.0.2/bev_stereo_lss_r50_256x704_128x128_24e_key4.pth) |\n|[BEVStereo](bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_key4_ema.py)|√  | |0.3435|\t0.6585|\t0.2757|\t0.5792|\t0.5034|\t0.2163|\t0.4485 | [github](https://github.com/Megvii-BaseDetection/BEVStereo/releases/download/v0.0.2/bev_stereo_lss_r50_256x704_128x128_24e_key4_ema.pth) |\n|[BEVStereo](bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da.py)|  |√ |0.3576|\t0.6071|\t0.2684|\t0.4157|\t0.3928|\t0.2021|\t0.4902 | [github](https://github.com/Megvii-BaseDetection/BEVStereo/releases/download/v0.0.2/bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da.pth) |\n|[BEVStereo](bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema.py)|√  |√ |0.3721|\t0.5980|\t0.2701|\t0.4381|\t0.3672|\t0.1898|\t0.4997 | [github](https://github.com/Megvii-BaseDetection/BEVStereo/releases/download/v0.0.2/bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema.pth) |\n\n## FAQ\n\n### EMA\n- The results are different between evaluation during training and evaluation from ckpt.\n\nDue to the working mechanism of EMA, the model parameters saved by ckpt are different from the model parameters used in the training stage.\n\n- EMA exps are unable to resume training from ckpt.\n\nWe used the customized EMA callback and this function is not supported for now.\n\n## Cite BEVDepth & BEVStereo & MatrixVT\nIf you use BEVDepth and BEVStereo in your research, please cite our work by using the following BibTeX entry:\n\n```latex\n @article{li2022bevdepth,\n  title={BEVDepth: Acquisition of Reliable Depth for Multi-view 3D Object Detection},\n  author={Li, Yinhao and Ge, Zheng and Yu, Guanyi and Yang, Jinrong and Wang, Zengran and Shi, Yukang and Sun, Jianjian and Li, Zeming},\n  journal={arXiv preprint arXiv:2206.10092},\n  year={2022}\n}\n@article{li2022bevstereo,\n  title={Bevstereo: Enhancing depth estimation in multi-view 3d object detection with dynamic temporal stereo},\n  author={Li, Yinhao and Bao, Han and Ge, Zheng and Yang, Jinrong and Sun, Jianjian and Li, Zeming},\n  journal={arXiv preprint arXiv:2209.10248},\n  year={2022}\n}\n@article{zhou2022matrixvt,\n  title={MatrixVT: Efficient Multi-Camera to BEV Transformation for 3D Perception},\n  author={Zhou, Hongyu and Ge, Zheng and Li, Zeming and Zhang, Xiangyu},\n  journal={arXiv preprint arXiv:2211.10593},\n  year={2022}\n}\n```\n"
  },
  {
    "path": "bevdepth/callbacks/ema.py",
    "content": "#!/usr/bin/env python3\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nimport math\nimport os\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nfrom pytorch_lightning.callbacks import Callback\n\n__all__ = ['ModelEMA', 'is_parallel']\n\n\ndef is_parallel(model):\n    \"\"\"check if model is in parallel mode.\"\"\"\n    parallel_type = (\n        nn.parallel.DataParallel,\n        nn.parallel.DistributedDataParallel,\n    )\n    return isinstance(model, parallel_type)\n\n\nclass ModelEMA:\n    \"\"\"\n    Model Exponential Moving Average from https://github.com/rwightman/\n    pytorch-image-models Keep a moving average of everything in\n    the model state_dict (parameters and buffers).\n    This is intended to allow functionality like\n    https://www.tensorflow.org/api_docs/python/tf/train/\n    ExponentialMovingAverage\n    A smoothed version of the weights is necessary for some training\n    schemes to perform well.\n    This class is sensitive where it is initialized in the sequence\n    of model init, GPU assignment and distributed training wrappers.\n    \"\"\"\n\n    def __init__(self, model, decay=0.9999, updates=0):\n        \"\"\"\n        Args:\n            model (nn.Module): model to apply EMA.\n            decay (float): ema decay.\n            updates (int): counter of EMA updates.\n        \"\"\"\n        # Create EMA(FP32)\n        self.ema = deepcopy(\n            model.module if is_parallel(model) else model).eval()\n        self.updates = updates\n        # decay exponential ramp (to help early epochs)\n        self.decay = lambda x: decay * (1 - math.exp(-x / 2000))\n        for p in self.ema.parameters():\n            p.requires_grad_(False)\n\n    def update(self, trainer, model):\n        # Update EMA parameters\n        with torch.no_grad():\n            self.updates += 1\n            d = self.decay(self.updates)\n\n            msd = model.module.state_dict() if is_parallel(\n                model) else model.state_dict()  # model state_dict\n            for k, v in self.ema.state_dict().items():\n                if v.dtype.is_floating_point:\n                    v *= d\n                    v += (1.0 - d) * msd[k].detach()\n\n\nclass EMACallback(Callback):\n\n    def __init__(self, len_updates) -> None:\n        super().__init__()\n        self.len_updates = len_updates\n\n    def on_fit_start(self, trainer, pl_module):\n        # Todo (@lizeming@megvii.com): delete manually specified device\n        from torch.nn.modules.batchnorm import SyncBatchNorm\n\n        bn_model_list = list()\n        bn_model_dist_group_list = list()\n        for model_ref in trainer.model.modules():\n            if isinstance(model_ref, SyncBatchNorm):\n                bn_model_list.append(model_ref)\n                bn_model_dist_group_list.append(model_ref.process_group)\n                model_ref.process_group = None\n        trainer.ema_model = ModelEMA(trainer.model.module.module.model.cuda(),\n                                     0.9990)\n\n        for bn_model, dist_group in zip(bn_model_list,\n                                        bn_model_dist_group_list):\n            bn_model.process_group = dist_group\n        trainer.ema_model.updates = self.len_updates\n\n    def on_train_batch_end(self,\n                           trainer,\n                           pl_module,\n                           outputs,\n                           batch,\n                           batch_idx,\n                           unused=0):\n        trainer.ema_model.update(trainer, trainer.model.module.module.model)\n\n    def on_train_epoch_end(self, trainer, pl_module) -> None:\n        state_dict = trainer.ema_model.ema.state_dict()\n        state_dict_keys = list(state_dict.keys())\n        # TODO: Change to more elegant way.\n        for state_dict_key in state_dict_keys:\n            new_key = 'model.' + state_dict_key\n            state_dict[new_key] = state_dict.pop(state_dict_key)\n        checkpoint = {\n            # the epoch and global step are saved for\n            # compatibility but they are not relevant for restoration\n            'epoch': trainer.current_epoch,\n            'global_step': trainer.global_step,\n            'state_dict': state_dict\n        }\n        torch.save(\n            checkpoint,\n            os.path.join(trainer.log_dir, f'{trainer.current_epoch}.pth'))\n"
  },
  {
    "path": "bevdepth/datasets/nusc_det_dataset.py",
    "content": "import os\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmdet3d.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes\nfrom nuscenes.utils.data_classes import Box, LidarPointCloud\nfrom nuscenes.utils.geometry_utils import view_points\nfrom PIL import Image\nfrom pyquaternion import Quaternion\nfrom torch.utils.data import Dataset\n\n__all__ = ['NuscDetDataset']\n\nmap_name_from_general_to_detection = {\n    'human.pedestrian.adult': 'pedestrian',\n    'human.pedestrian.child': 'pedestrian',\n    'human.pedestrian.wheelchair': 'ignore',\n    'human.pedestrian.stroller': 'ignore',\n    'human.pedestrian.personal_mobility': 'ignore',\n    'human.pedestrian.police_officer': 'pedestrian',\n    'human.pedestrian.construction_worker': 'pedestrian',\n    'animal': 'ignore',\n    'vehicle.car': 'car',\n    'vehicle.motorcycle': 'motorcycle',\n    'vehicle.bicycle': 'bicycle',\n    'vehicle.bus.bendy': 'bus',\n    'vehicle.bus.rigid': 'bus',\n    'vehicle.truck': 'truck',\n    'vehicle.construction': 'construction_vehicle',\n    'vehicle.emergency.ambulance': 'ignore',\n    'vehicle.emergency.police': 'ignore',\n    'vehicle.trailer': 'trailer',\n    'movable_object.barrier': 'barrier',\n    'movable_object.trafficcone': 'traffic_cone',\n    'movable_object.pushable_pullable': 'ignore',\n    'movable_object.debris': 'ignore',\n    'static_object.bicycle_rack': 'ignore',\n}\n\n\ndef get_rot(h):\n    return torch.Tensor([\n        [np.cos(h), np.sin(h)],\n        [-np.sin(h), np.cos(h)],\n    ])\n\n\ndef img_transform(img, resize, resize_dims, crop, flip, rotate):\n    ida_rot = torch.eye(2)\n    ida_tran = torch.zeros(2)\n    # adjust image\n    img = img.resize(resize_dims)\n    img = img.crop(crop)\n    if flip:\n        img = img.transpose(method=Image.FLIP_LEFT_RIGHT)\n    img = img.rotate(rotate)\n\n    # post-homography transformation\n    ida_rot *= resize\n    ida_tran -= torch.Tensor(crop[:2])\n    if flip:\n        A = torch.Tensor([[-1, 0], [0, 1]])\n        b = torch.Tensor([crop[2] - crop[0], 0])\n        ida_rot = A.matmul(ida_rot)\n        ida_tran = A.matmul(ida_tran) + b\n    A = get_rot(rotate / 180 * np.pi)\n    b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2\n    b = A.matmul(-b) + b\n    ida_rot = A.matmul(ida_rot)\n    ida_tran = A.matmul(ida_tran) + b\n    ida_mat = ida_rot.new_zeros(4, 4)\n    ida_mat[3, 3] = 1\n    ida_mat[2, 2] = 1\n    ida_mat[:2, :2] = ida_rot\n    ida_mat[:2, 3] = ida_tran\n    return img, ida_mat\n\n\ndef bev_transform(gt_boxes, rotate_angle, scale_ratio, flip_dx, flip_dy):\n    rotate_angle = torch.tensor(rotate_angle / 180 * np.pi)\n    rot_sin = torch.sin(rotate_angle)\n    rot_cos = torch.cos(rotate_angle)\n    rot_mat = torch.Tensor([[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0],\n                            [0, 0, 1]])\n    scale_mat = torch.Tensor([[scale_ratio, 0, 0], [0, scale_ratio, 0],\n                              [0, 0, scale_ratio]])\n    flip_mat = torch.Tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n    if flip_dx:\n        flip_mat = flip_mat @ torch.Tensor([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])\n    if flip_dy:\n        flip_mat = flip_mat @ torch.Tensor([[1, 0, 0], [0, -1, 0], [0, 0, 1]])\n    rot_mat = flip_mat @ (scale_mat @ rot_mat)\n    if gt_boxes.shape[0] > 0:\n        gt_boxes[:, :3] = (rot_mat @ gt_boxes[:, :3].unsqueeze(-1)).squeeze(-1)\n        gt_boxes[:, 3:6] *= scale_ratio\n        gt_boxes[:, 6] += rotate_angle\n        if flip_dx:\n            gt_boxes[:, 6] = 2 * torch.asin(torch.tensor(1.0)) - gt_boxes[:, 6]\n        if flip_dy:\n            gt_boxes[:, 6] = -gt_boxes[:, 6]\n        gt_boxes[:, 7:] = (\n            rot_mat[:2, :2] @ gt_boxes[:, 7:].unsqueeze(-1)).squeeze(-1)\n    return gt_boxes, rot_mat\n\n\ndef depth_transform(cam_depth, resize, resize_dims, crop, flip, rotate):\n    \"\"\"Transform depth based on ida augmentation configuration.\n\n    Args:\n        cam_depth (np array): Nx3, 3: x,y,d.\n        resize (float): Resize factor.\n        resize_dims (list): Final dimension.\n        crop (list): x1, y1, x2, y2\n        flip (bool): Whether to flip.\n        rotate (float): Rotation value.\n\n    Returns:\n        np array: [h/down_ratio, w/down_ratio, d]\n    \"\"\"\n\n    H, W = resize_dims\n    cam_depth[:, :2] = cam_depth[:, :2] * resize\n    cam_depth[:, 0] -= crop[0]\n    cam_depth[:, 1] -= crop[1]\n    if flip:\n        cam_depth[:, 0] = resize_dims[1] - cam_depth[:, 0]\n\n    cam_depth[:, 0] -= W / 2.0\n    cam_depth[:, 1] -= H / 2.0\n\n    h = rotate / 180 * np.pi\n    rot_matrix = [\n        [np.cos(h), np.sin(h)],\n        [-np.sin(h), np.cos(h)],\n    ]\n    cam_depth[:, :2] = np.matmul(rot_matrix, cam_depth[:, :2].T).T\n\n    cam_depth[:, 0] += W / 2.0\n    cam_depth[:, 1] += H / 2.0\n\n    depth_coords = cam_depth[:, :2].astype(np.int16)\n\n    depth_map = np.zeros(resize_dims)\n    valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n                  & (depth_coords[:, 0] < resize_dims[1])\n                  & (depth_coords[:, 1] >= 0)\n                  & (depth_coords[:, 0] >= 0))\n    depth_map[depth_coords[valid_mask, 1],\n              depth_coords[valid_mask, 0]] = cam_depth[valid_mask, 2]\n\n    return torch.Tensor(depth_map)\n\n\ndef map_pointcloud_to_image(\n    lidar_points,\n    img,\n    lidar_calibrated_sensor,\n    lidar_ego_pose,\n    cam_calibrated_sensor,\n    cam_ego_pose,\n    min_dist: float = 0.0,\n):\n\n    # Points live in the point sensor frame. So they need to be\n    # transformed via global to the image plane.\n    # First step: transform the pointcloud to the ego vehicle\n    # frame for the timestamp of the sweep.\n\n    lidar_points = LidarPointCloud(lidar_points.T)\n    lidar_points.rotate(\n        Quaternion(lidar_calibrated_sensor['rotation']).rotation_matrix)\n    lidar_points.translate(np.array(lidar_calibrated_sensor['translation']))\n\n    # Second step: transform from ego to the global frame.\n    lidar_points.rotate(Quaternion(lidar_ego_pose['rotation']).rotation_matrix)\n    lidar_points.translate(np.array(lidar_ego_pose['translation']))\n\n    # Third step: transform from global into the ego vehicle\n    # frame for the timestamp of the image.\n    lidar_points.translate(-np.array(cam_ego_pose['translation']))\n    lidar_points.rotate(Quaternion(cam_ego_pose['rotation']).rotation_matrix.T)\n\n    # Fourth step: transform from ego into the camera.\n    lidar_points.translate(-np.array(cam_calibrated_sensor['translation']))\n    lidar_points.rotate(\n        Quaternion(cam_calibrated_sensor['rotation']).rotation_matrix.T)\n\n    # Fifth step: actually take a \"picture\" of the point cloud.\n    # Grab the depths (camera frame z axis points away from the camera).\n    depths = lidar_points.points[2, :]\n    coloring = depths\n\n    # Take the actual picture (matrix multiplication with camera-matrix\n    # + renormalization).\n    points = view_points(lidar_points.points[:3, :],\n                         np.array(cam_calibrated_sensor['camera_intrinsic']),\n                         normalize=True)\n\n    # Remove points that are either outside or behind the camera.\n    # Leave a margin of 1 pixel for aesthetic reasons. Also make\n    # sure points are at least 1m in front of the camera to avoid\n    # seeing the lidar points on the camera casing for non-keyframes\n    # which are slightly out of sync.\n    mask = np.ones(depths.shape[0], dtype=bool)\n    mask = np.logical_and(mask, depths > min_dist)\n    mask = np.logical_and(mask, points[0, :] > 1)\n    mask = np.logical_and(mask, points[0, :] < img.size[0] - 1)\n    mask = np.logical_and(mask, points[1, :] > 1)\n    mask = np.logical_and(mask, points[1, :] < img.size[1] - 1)\n    points = points[:, mask]\n    coloring = coloring[mask]\n\n    return points, coloring\n\n\nclass NuscDetDataset(Dataset):\n\n    def __init__(self,\n                 ida_aug_conf,\n                 bda_aug_conf,\n                 classes,\n                 data_root,\n                 info_paths,\n                 is_train,\n                 use_cbgs=False,\n                 num_sweeps=1,\n                 img_conf=dict(img_mean=[123.675, 116.28, 103.53],\n                               img_std=[58.395, 57.12, 57.375],\n                               to_rgb=True),\n                 return_depth=False,\n                 sweep_idxes=list(),\n                 key_idxes=list(),\n                 use_fusion=False):\n        \"\"\"Dataset used for bevdetection task.\n        Args:\n            ida_aug_conf (dict): Config for ida augmentation.\n            bda_aug_conf (dict): Config for bda augmentation.\n            classes (list): Class names.\n            use_cbgs (bool): Whether to use cbgs strategy,\n                Default: False.\n            num_sweeps (int): Number of sweeps to be used for each sample.\n                default: 1.\n            img_conf (dict): Config for image.\n            return_depth (bool): Whether to use depth gt.\n                default: False.\n            sweep_idxes (list): List of sweep idxes to be used.\n                default: list().\n            key_idxes (list): List of key idxes to be used.\n                default: list().\n            use_fusion (bool): Whether to use lidar data.\n                default: False.\n        \"\"\"\n        super().__init__()\n        if isinstance(info_paths, list):\n            self.infos = list()\n            for info_path in info_paths:\n                self.infos.extend(mmcv.load(info_path))\n        else:\n            self.infos = mmcv.load(info_paths)\n        self.is_train = is_train\n        self.ida_aug_conf = ida_aug_conf\n        self.bda_aug_conf = bda_aug_conf\n        self.data_root = data_root\n        self.classes = classes\n        self.use_cbgs = use_cbgs\n        if self.use_cbgs:\n            self.cat2id = {name: i for i, name in enumerate(self.classes)}\n            self.sample_indices = self._get_sample_indices()\n        self.num_sweeps = num_sweeps\n        self.img_mean = np.array(img_conf['img_mean'], np.float32)\n        self.img_std = np.array(img_conf['img_std'], np.float32)\n        self.to_rgb = img_conf['to_rgb']\n        self.return_depth = return_depth\n        assert sum([sweep_idx >= 0 for sweep_idx in sweep_idxes]) \\\n            == len(sweep_idxes), 'All `sweep_idxes` must greater \\\n                than or equal to 0.'\n\n        self.sweeps_idx = sweep_idxes\n        assert sum([key_idx < 0 for key_idx in key_idxes]) == len(key_idxes),\\\n            'All `key_idxes` must less than 0.'\n        self.key_idxes = [0] + key_idxes\n        self.use_fusion = use_fusion\n\n    def _get_sample_indices(self):\n        \"\"\"Load annotations from ann_file.\n\n        Args:\n            ann_file (str): Path of the annotation file.\n\n        Returns:\n            list[dict]: List of annotations after class sampling.\n        \"\"\"\n        class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()}\n        for idx, info in enumerate(self.infos):\n            gt_names = set(\n                [ann_info['category_name'] for ann_info in info['ann_infos']])\n            for gt_name in gt_names:\n                gt_name = map_name_from_general_to_detection[gt_name]\n                if gt_name not in self.classes:\n                    continue\n                class_sample_idxs[self.cat2id[gt_name]].append(idx)\n        duplicated_samples = sum(\n            [len(v) for _, v in class_sample_idxs.items()])\n        class_distribution = {\n            k: len(v) / duplicated_samples\n            for k, v in class_sample_idxs.items()\n        }\n\n        sample_indices = []\n\n        frac = 1.0 / len(self.classes)\n        ratios = [frac / v for v in class_distribution.values()]\n        for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios):\n            sample_indices += np.random.choice(cls_inds,\n                                               int(len(cls_inds) *\n                                                   ratio)).tolist()\n        return sample_indices\n\n    def sample_ida_augmentation(self):\n        \"\"\"Generate ida augmentation values based on ida_config.\"\"\"\n        H, W = self.ida_aug_conf['H'], self.ida_aug_conf['W']\n        fH, fW = self.ida_aug_conf['final_dim']\n        if self.is_train:\n            resize = np.random.uniform(*self.ida_aug_conf['resize_lim'])\n            resize_dims = (int(W * resize), int(H * resize))\n            newW, newH = resize_dims\n            crop_h = int(\n                (1 - np.random.uniform(*self.ida_aug_conf['bot_pct_lim'])) *\n                newH) - fH\n            crop_w = int(np.random.uniform(0, max(0, newW - fW)))\n            crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n            flip = False\n            if self.ida_aug_conf['rand_flip'] and np.random.choice([0, 1]):\n                flip = True\n            rotate_ida = np.random.uniform(*self.ida_aug_conf['rot_lim'])\n        else:\n            resize = max(fH / H, fW / W)\n            resize_dims = (int(W * resize), int(H * resize))\n            newW, newH = resize_dims\n            crop_h = int(\n                (1 - np.mean(self.ida_aug_conf['bot_pct_lim'])) * newH) - fH\n            crop_w = int(max(0, newW - fW) / 2)\n            crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n            flip = False\n            rotate_ida = 0\n        return resize, resize_dims, crop, flip, rotate_ida\n\n    def sample_bda_augmentation(self):\n        \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n        if self.is_train:\n            rotate_bda = np.random.uniform(*self.bda_aug_conf['rot_lim'])\n            scale_bda = np.random.uniform(*self.bda_aug_conf['scale_lim'])\n            flip_dx = np.random.uniform() < self.bda_aug_conf['flip_dx_ratio']\n            flip_dy = np.random.uniform() < self.bda_aug_conf['flip_dy_ratio']\n        else:\n            rotate_bda = 0\n            scale_bda = 1.0\n            flip_dx = False\n            flip_dy = False\n        return rotate_bda, scale_bda, flip_dx, flip_dy\n\n    def get_lidar_depth(self, lidar_points, img, lidar_info, cam_info):\n        lidar_calibrated_sensor = lidar_info['LIDAR_TOP']['calibrated_sensor']\n        lidar_ego_pose = lidar_info['LIDAR_TOP']['ego_pose']\n        cam_calibrated_sensor = cam_info['calibrated_sensor']\n        cam_ego_pose = cam_info['ego_pose']\n        pts_img, depth = map_pointcloud_to_image(\n            lidar_points.copy(), img, lidar_calibrated_sensor.copy(),\n            lidar_ego_pose.copy(), cam_calibrated_sensor, cam_ego_pose)\n        return np.concatenate([pts_img[:2, :].T, depth[:, None]],\n                              axis=1).astype(np.float32)\n\n    def get_image(self, cam_infos, cams, lidar_infos=None):\n        \"\"\"Given data and cam_names, return image data needed.\n\n        Args:\n            sweeps_data (list): Raw data used to generate the data we needed.\n            cams (list): Camera names.\n\n        Returns:\n            Tensor: Image data after processing.\n            Tensor: Transformation matrix from camera to ego.\n            Tensor: Intrinsic matrix.\n            Tensor: Transformation matrix for ida.\n            Tensor: Transformation matrix from key\n                frame camera to sweep frame camera.\n            Tensor: timestamps.\n            dict: meta infos needed for evaluation.\n        \"\"\"\n        assert len(cam_infos) > 0\n        sweep_imgs = list()\n        sweep_sensor2ego_mats = list()\n        sweep_intrin_mats = list()\n        sweep_ida_mats = list()\n        sweep_sensor2sensor_mats = list()\n        sweep_timestamps = list()\n        sweep_lidar_depth = list()\n        if self.return_depth or self.use_fusion:\n            sweep_lidar_points = list()\n            for lidar_info in lidar_infos:\n                lidar_path = lidar_info['LIDAR_TOP']['filename']\n                lidar_points = np.fromfile(os.path.join(\n                    self.data_root, lidar_path),\n                                           dtype=np.float32,\n                                           count=-1).reshape(-1, 5)[..., :4]\n                sweep_lidar_points.append(lidar_points)\n        for cam in cams:\n            imgs = list()\n            sensor2ego_mats = list()\n            intrin_mats = list()\n            ida_mats = list()\n            sensor2sensor_mats = list()\n            timestamps = list()\n            lidar_depth = list()\n            key_info = cam_infos[0]\n            resize, resize_dims, crop, flip, \\\n                rotate_ida = self.sample_ida_augmentation(\n                    )\n            for sweep_idx, cam_info in enumerate(cam_infos):\n\n                img = Image.open(\n                    os.path.join(self.data_root, cam_info[cam]['filename']))\n                # img = Image.fromarray(img)\n                w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n                # sweep sensor to sweep ego\n                sweepsensor2sweepego_rot = torch.Tensor(\n                    Quaternion(w, x, y, z).rotation_matrix)\n                sweepsensor2sweepego_tran = torch.Tensor(\n                    cam_info[cam]['calibrated_sensor']['translation'])\n                sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n                    (4, 4))\n                sweepsensor2sweepego[3, 3] = 1\n                sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n                sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n                # sweep ego to global\n                w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n                sweepego2global_rot = torch.Tensor(\n                    Quaternion(w, x, y, z).rotation_matrix)\n                sweepego2global_tran = torch.Tensor(\n                    cam_info[cam]['ego_pose']['translation'])\n                sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n                sweepego2global[3, 3] = 1\n                sweepego2global[:3, :3] = sweepego2global_rot\n                sweepego2global[:3, -1] = sweepego2global_tran\n\n                # global sensor to cur ego\n                w, x, y, z = key_info[cam]['ego_pose']['rotation']\n                keyego2global_rot = torch.Tensor(\n                    Quaternion(w, x, y, z).rotation_matrix)\n                keyego2global_tran = torch.Tensor(\n                    key_info[cam]['ego_pose']['translation'])\n                keyego2global = keyego2global_rot.new_zeros((4, 4))\n                keyego2global[3, 3] = 1\n                keyego2global[:3, :3] = keyego2global_rot\n                keyego2global[:3, -1] = keyego2global_tran\n                global2keyego = keyego2global.inverse()\n\n                # cur ego to sensor\n                w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n                keysensor2keyego_rot = torch.Tensor(\n                    Quaternion(w, x, y, z).rotation_matrix)\n                keysensor2keyego_tran = torch.Tensor(\n                    key_info[cam]['calibrated_sensor']['translation'])\n                keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n                keysensor2keyego[3, 3] = 1\n                keysensor2keyego[:3, :3] = keysensor2keyego_rot\n                keysensor2keyego[:3, -1] = keysensor2keyego_tran\n                keyego2keysensor = keysensor2keyego.inverse()\n                keysensor2sweepsensor = (\n                    keyego2keysensor @ global2keyego @ sweepego2global\n                    @ sweepsensor2sweepego).inverse()\n                sweepsensor2keyego = global2keyego @ sweepego2global @\\\n                    sweepsensor2sweepego\n                sensor2ego_mats.append(sweepsensor2keyego)\n                sensor2sensor_mats.append(keysensor2sweepsensor)\n                intrin_mat = torch.zeros((4, 4))\n                intrin_mat[3, 3] = 1\n                intrin_mat[:3, :3] = torch.Tensor(\n                    cam_info[cam]['calibrated_sensor']['camera_intrinsic'])\n                if self.return_depth and (self.use_fusion or sweep_idx == 0):\n                    point_depth = self.get_lidar_depth(\n                        sweep_lidar_points[sweep_idx], img,\n                        lidar_infos[sweep_idx], cam_info[cam])\n                    point_depth_augmented = depth_transform(\n                        point_depth, resize, self.ida_aug_conf['final_dim'],\n                        crop, flip, rotate_ida)\n                    lidar_depth.append(point_depth_augmented)\n                img, ida_mat = img_transform(\n                    img,\n                    resize=resize,\n                    resize_dims=resize_dims,\n                    crop=crop,\n                    flip=flip,\n                    rotate=rotate_ida,\n                )\n                ida_mats.append(ida_mat)\n                img = mmcv.imnormalize(np.array(img), self.img_mean,\n                                       self.img_std, self.to_rgb)\n                img = torch.from_numpy(img).permute(2, 0, 1)\n                imgs.append(img)\n                intrin_mats.append(intrin_mat)\n                timestamps.append(cam_info[cam]['timestamp'])\n            sweep_imgs.append(torch.stack(imgs))\n            sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n            sweep_intrin_mats.append(torch.stack(intrin_mats))\n            sweep_ida_mats.append(torch.stack(ida_mats))\n            sweep_sensor2sensor_mats.append(torch.stack(sensor2sensor_mats))\n            sweep_timestamps.append(torch.tensor(timestamps))\n            if self.return_depth:\n                sweep_lidar_depth.append(torch.stack(lidar_depth))\n        # Get mean pose of all cams.\n        ego2global_rotation = np.mean(\n            [key_info[cam]['ego_pose']['rotation'] for cam in cams], 0)\n        ego2global_translation = np.mean(\n            [key_info[cam]['ego_pose']['translation'] for cam in cams], 0)\n        img_metas = dict(\n            box_type_3d=LiDARInstance3DBoxes,\n            ego2global_translation=ego2global_translation,\n            ego2global_rotation=ego2global_rotation,\n        )\n\n        ret_list = [\n            torch.stack(sweep_imgs).permute(1, 0, 2, 3, 4),\n            torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3),\n            torch.stack(sweep_intrin_mats).permute(1, 0, 2, 3),\n            torch.stack(sweep_ida_mats).permute(1, 0, 2, 3),\n            torch.stack(sweep_sensor2sensor_mats).permute(1, 0, 2, 3),\n            torch.stack(sweep_timestamps).permute(1, 0),\n            img_metas,\n        ]\n        if self.return_depth:\n            ret_list.append(torch.stack(sweep_lidar_depth).permute(1, 0, 2, 3))\n        return ret_list\n\n    def get_gt(self, info, cams):\n        \"\"\"Generate gt labels from info.\n\n        Args:\n            info(dict): Infos needed to generate gt labels.\n            cams(list): Camera names.\n\n        Returns:\n            Tensor: GT bboxes.\n            Tensor: GT labels.\n        \"\"\"\n        ego2global_rotation = np.mean(\n            [info['cam_infos'][cam]['ego_pose']['rotation'] for cam in cams],\n            0)\n        ego2global_translation = np.mean([\n            info['cam_infos'][cam]['ego_pose']['translation'] for cam in cams\n        ], 0)\n        trans = -np.array(ego2global_translation)\n        rot = Quaternion(ego2global_rotation).inverse\n        gt_boxes = list()\n        gt_labels = list()\n        for ann_info in info['ann_infos']:\n            # Use ego coordinate.\n            if (map_name_from_general_to_detection[ann_info['category_name']]\n                    not in self.classes\n                    or ann_info['num_lidar_pts'] + ann_info['num_radar_pts'] <=\n                    0):\n                continue\n            box = Box(\n                ann_info['translation'],\n                ann_info['size'],\n                Quaternion(ann_info['rotation']),\n                velocity=ann_info['velocity'],\n            )\n            box.translate(trans)\n            box.rotate(rot)\n            box_xyz = np.array(box.center)\n            box_dxdydz = np.array(box.wlh)[[1, 0, 2]]\n            box_yaw = np.array([box.orientation.yaw_pitch_roll[0]])\n            box_velo = np.array(box.velocity[:2])\n            gt_box = np.concatenate([box_xyz, box_dxdydz, box_yaw, box_velo])\n            gt_boxes.append(gt_box)\n            gt_labels.append(\n                self.classes.index(map_name_from_general_to_detection[\n                    ann_info['category_name']]))\n        return torch.Tensor(gt_boxes), torch.tensor(gt_labels)\n\n    def choose_cams(self):\n        \"\"\"Choose cameras randomly.\n\n        Returns:\n            list: Cameras to be used.\n        \"\"\"\n        if self.is_train and self.ida_aug_conf['Ncams'] < len(\n                self.ida_aug_conf['cams']):\n            cams = np.random.choice(self.ida_aug_conf['cams'],\n                                    self.ida_aug_conf['Ncams'],\n                                    replace=False)\n        else:\n            cams = self.ida_aug_conf['cams']\n        return cams\n\n    def __getitem__(self, idx):\n        if self.use_cbgs:\n            idx = self.sample_indices[idx]\n        cam_infos = list()\n        lidar_infos = list()\n        # TODO: Check if it still works when number of cameras is reduced.\n        cams = self.choose_cams()\n        for key_idx in self.key_idxes:\n            cur_idx = key_idx + idx\n            # Handle scenarios when current idx doesn't have previous key\n            # frame or previous key frame is from another scene.\n            if cur_idx < 0:\n                cur_idx = idx\n            elif self.infos[cur_idx]['scene_token'] != self.infos[idx][\n                    'scene_token']:\n                cur_idx = idx\n            info = self.infos[cur_idx]\n            cam_infos.append(info['cam_infos'])\n            lidar_infos.append(info['lidar_infos'])\n            lidar_sweep_timestamps = [\n                lidar_sweep['LIDAR_TOP']['timestamp']\n                for lidar_sweep in info['lidar_sweeps']\n            ]\n            for sweep_idx in self.sweeps_idx:\n                if len(info['cam_sweeps']) == 0:\n                    cam_infos.append(info['cam_infos'])\n                    lidar_infos.append(info['lidar_infos'])\n                else:\n                    # Handle scenarios when current sweep doesn't have all\n                    # cam keys.\n                    for i in range(min(len(info['cam_sweeps']) - 1, sweep_idx),\n                                   -1, -1):\n                        if sum([cam in info['cam_sweeps'][i]\n                                for cam in cams]) == len(cams):\n                            cam_infos.append(info['cam_sweeps'][i])\n                            cam_timestamp = np.mean([\n                                val['timestamp']\n                                for val in info['cam_sweeps'][i].values()\n                            ])\n                            # Find the closest lidar frame to the cam frame.\n                            lidar_idx = np.abs(lidar_sweep_timestamps -\n                                               cam_timestamp).argmin()\n                            lidar_infos.append(info['lidar_sweeps'][lidar_idx])\n                            break\n        if self.return_depth or self.use_fusion:\n            image_data_list = self.get_image(cam_infos, cams, lidar_infos)\n\n        else:\n            image_data_list = self.get_image(cam_infos, cams)\n        ret_list = list()\n        (\n            sweep_imgs,\n            sweep_sensor2ego_mats,\n            sweep_intrins,\n            sweep_ida_mats,\n            sweep_sensor2sensor_mats,\n            sweep_timestamps,\n            img_metas,\n        ) = image_data_list[:7]\n        img_metas['token'] = self.infos[idx]['sample_token']\n        if self.is_train:\n            gt_boxes, gt_labels = self.get_gt(self.infos[idx], cams)\n        # Temporary solution for test.\n        else:\n            gt_boxes = sweep_imgs.new_zeros(0, 7)\n            gt_labels = sweep_imgs.new_zeros(0, )\n\n        rotate_bda, scale_bda, flip_dx, flip_dy = self.sample_bda_augmentation(\n        )\n        bda_mat = sweep_imgs.new_zeros(4, 4)\n        bda_mat[3, 3] = 1\n        gt_boxes, bda_rot = bev_transform(gt_boxes, rotate_bda, scale_bda,\n                                          flip_dx, flip_dy)\n        bda_mat[:3, :3] = bda_rot\n        ret_list = [\n            sweep_imgs,\n            sweep_sensor2ego_mats,\n            sweep_intrins,\n            sweep_ida_mats,\n            sweep_sensor2sensor_mats,\n            bda_mat,\n            sweep_timestamps,\n            img_metas,\n            gt_boxes,\n            gt_labels,\n        ]\n        if self.return_depth:\n            ret_list.append(image_data_list[7])\n        return ret_list\n\n    def __str__(self):\n        return f\"\"\"NuscData: {len(self)} samples. Split: \\\n            {\"train\" if self.is_train else \"val\"}.\n                    Augmentation Conf: {self.ida_aug_conf}\"\"\"\n\n    def __len__(self):\n        if self.use_cbgs:\n            return len(self.sample_indices)\n        else:\n            return len(self.infos)\n\n\ndef collate_fn(data, is_return_depth=False):\n    imgs_batch = list()\n    sensor2ego_mats_batch = list()\n    intrin_mats_batch = list()\n    ida_mats_batch = list()\n    sensor2sensor_mats_batch = list()\n    bda_mat_batch = list()\n    timestamps_batch = list()\n    gt_boxes_batch = list()\n    gt_labels_batch = list()\n    img_metas_batch = list()\n    depth_labels_batch = list()\n    for iter_data in data:\n        (\n            sweep_imgs,\n            sweep_sensor2ego_mats,\n            sweep_intrins,\n            sweep_ida_mats,\n            sweep_sensor2sensor_mats,\n            bda_mat,\n            sweep_timestamps,\n            img_metas,\n            gt_boxes,\n            gt_labels,\n        ) = iter_data[:10]\n        if is_return_depth:\n            gt_depth = iter_data[10]\n            depth_labels_batch.append(gt_depth)\n        imgs_batch.append(sweep_imgs)\n        sensor2ego_mats_batch.append(sweep_sensor2ego_mats)\n        intrin_mats_batch.append(sweep_intrins)\n        ida_mats_batch.append(sweep_ida_mats)\n        sensor2sensor_mats_batch.append(sweep_sensor2sensor_mats)\n        bda_mat_batch.append(bda_mat)\n        timestamps_batch.append(sweep_timestamps)\n        img_metas_batch.append(img_metas)\n        gt_boxes_batch.append(gt_boxes)\n        gt_labels_batch.append(gt_labels)\n    mats_dict = dict()\n    mats_dict['sensor2ego_mats'] = torch.stack(sensor2ego_mats_batch)\n    mats_dict['intrin_mats'] = torch.stack(intrin_mats_batch)\n    mats_dict['ida_mats'] = torch.stack(ida_mats_batch)\n    mats_dict['sensor2sensor_mats'] = torch.stack(sensor2sensor_mats_batch)\n    mats_dict['bda_mat'] = torch.stack(bda_mat_batch)\n    ret_list = [\n        torch.stack(imgs_batch),\n        mats_dict,\n        torch.stack(timestamps_batch),\n        img_metas_batch,\n        gt_boxes_batch,\n        gt_labels_batch,\n    ]\n    if is_return_depth:\n        ret_list.append(torch.stack(depth_labels_batch))\n    return ret_list\n"
  },
  {
    "path": "bevdepth/evaluators/det_evaluators.py",
    "content": "'''Modified from # https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa\n'''\nimport os.path as osp\nimport tempfile\n\nimport mmcv\nimport numpy as np\nimport pyquaternion\nfrom nuscenes.utils.data_classes import Box\nfrom pyquaternion import Quaternion\n\n__all__ = ['DetNuscEvaluator']\n\n\nclass DetNuscEvaluator():\n    ErrNameMapping = {\n        'trans_err': 'mATE',\n        'scale_err': 'mASE',\n        'orient_err': 'mAOE',\n        'vel_err': 'mAVE',\n        'attr_err': 'mAAE',\n    }\n\n    DefaultAttribute = {\n        'car': 'vehicle.parked',\n        'pedestrian': 'pedestrian.moving',\n        'trailer': 'vehicle.parked',\n        'truck': 'vehicle.parked',\n        'bus': 'vehicle.moving',\n        'motorcycle': 'cycle.without_rider',\n        'construction_vehicle': 'vehicle.parked',\n        'bicycle': 'cycle.without_rider',\n        'barrier': '',\n        'traffic_cone': '',\n    }\n\n    def __init__(\n        self,\n        class_names,\n        eval_version='detection_cvpr_2019',\n        data_root='./data/nuScenes',\n        version='v1.0-trainval',\n        modality=dict(use_lidar=False,\n                      use_camera=True,\n                      use_radar=False,\n                      use_map=False,\n                      use_external=False),\n        output_dir=None,\n    ) -> None:\n        self.eval_version = eval_version\n        self.data_root = data_root\n        if self.eval_version is not None:\n            from nuscenes.eval.detection.config import config_factory\n\n            self.eval_detection_configs = config_factory(self.eval_version)\n        self.version = version\n        self.class_names = class_names\n        self.modality = modality\n        self.output_dir = output_dir\n\n    def _evaluate_single(self,\n                         result_path,\n                         logger=None,\n                         metric='bbox',\n                         result_name='pts_bbox'):\n        \"\"\"Evaluation for a single model in nuScenes protocol.\n\n        Args:\n            result_path (str): Path of the result file.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            metric (str): Metric name used for evaluation. Default: 'bbox'.\n            result_name (str): Result name in the metric prefix.\n                Default: 'pts_bbox'.\n\n        Returns:\n            dict: Dictionary of evaluation details.\n        \"\"\"\n        from nuscenes import NuScenes\n        from nuscenes.eval.detection.evaluate import NuScenesEval\n\n        output_dir = osp.join(*osp.split(result_path)[:-1])\n        nusc = NuScenes(version=self.version,\n                        dataroot=self.data_root,\n                        verbose=False)\n        eval_set_map = {\n            'v1.0-mini': 'mini_val',\n            'v1.0-trainval': 'val',\n        }\n        nusc_eval = NuScenesEval(nusc,\n                                 config=self.eval_detection_configs,\n                                 result_path=result_path,\n                                 eval_set=eval_set_map[self.version],\n                                 output_dir=output_dir,\n                                 verbose=False)\n        nusc_eval.main(render_curves=False)\n\n        # record metrics\n        metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))\n        detail = dict()\n        metric_prefix = f'{result_name}_NuScenes'\n        for class_name in self.class_names:\n            for k, v in metrics['label_aps'][class_name].items():\n                val = float('{:.4f}'.format(v))\n                detail['{}/{}_AP_dist_{}'.format(metric_prefix, class_name,\n                                                 k)] = val\n            for k, v in metrics['label_tp_errors'][class_name].items():\n                val = float('{:.4f}'.format(v))\n                detail['{}/{}_{}'.format(metric_prefix, class_name, k)] = val\n            for k, v in metrics['tp_errors'].items():\n                val = float('{:.4f}'.format(v))\n                detail['{}/{}'.format(metric_prefix,\n                                      self.ErrNameMapping[k])] = val\n\n        detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']\n        detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']\n        return detail\n\n    def format_results(self,\n                       results,\n                       img_metas,\n                       result_names=['img_bbox'],\n                       jsonfile_prefix=None,\n                       **kwargs):\n        \"\"\"Format the results to json (standard format for COCO evaluation).\n\n        Args:\n            results (list[tuple | numpy.ndarray]): Testing results of the\n                dataset.\n            jsonfile_prefix (str | None): The prefix of json files. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If not specified, a temp file will be created. Default: None.\n\n        Returns:\n            tuple: (result_files, tmp_dir), result_files is a dict containing \\\n                the json filepaths, tmp_dir is the temporal directory created \\\n                for saving json files when jsonfile_prefix is not specified.\n        \"\"\"\n        assert isinstance(results, list), 'results must be a list'\n\n        if jsonfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            tmp_dir = None\n\n        # currently the output prediction results could be in two formats\n        # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)\n        # 2. list of dict('pts_bbox' or 'img_bbox':\n        #     dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))\n        # this is a workaround to enable evaluation of both formats on nuScenes\n        # refer to https://github.com/open-mmlab/mmdetection3d/issues/449\n        # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict\n        result_files = dict()\n        # refactor this.\n        for rasult_name in result_names:\n            # not evaluate 2D predictions on nuScenes\n            if '2d' in rasult_name:\n                continue\n            print(f'\\nFormating bboxes of {rasult_name}')\n            tmp_file_ = osp.join(jsonfile_prefix, rasult_name)\n            if self.output_dir:\n                result_files.update({\n                    rasult_name:\n                    self._format_bbox(results, img_metas, self.output_dir)\n                })\n            else:\n                result_files.update({\n                    rasult_name:\n                    self._format_bbox(results, img_metas, tmp_file_)\n                })\n        return result_files, tmp_dir\n\n    def evaluate(\n        self,\n        results,\n        img_metas,\n        metric='bbox',\n        logger=None,\n        jsonfile_prefix=None,\n        result_names=['img_bbox'],\n        show=False,\n        out_dir=None,\n        pipeline=None,\n    ):\n        \"\"\"Evaluation in nuScenes protocol.\n\n        Args:\n            results (list[dict]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            jsonfile_prefix (str | None): The prefix of json files. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If not specified, a temp file will be created. Default: None.\n            show (bool): Whether to visualize.\n                Default: False.\n            out_dir (str): Path to save the visualization results.\n                Default: None.\n            pipeline (list[dict], optional): raw data loading for showing.\n                Default: None.\n\n        Returns:\n            dict[str, float]: Results of each evaluation metric.\n        \"\"\"\n        result_files, tmp_dir = self.format_results(results, img_metas,\n                                                    result_names,\n                                                    jsonfile_prefix)\n        if isinstance(result_files, dict):\n            for name in result_names:\n                print('Evaluating bboxes of {}'.format(name))\n                self._evaluate_single(result_files[name])\n        elif isinstance(result_files, str):\n            self._evaluate_single(result_files)\n\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n\n    def _format_bbox(self, results, img_metas, jsonfile_prefix=None):\n        \"\"\"Convert the results to the standard format.\n\n        Args:\n            results (list[dict]): Testing results of the dataset.\n            jsonfile_prefix (str): The prefix of the output jsonfile.\n                You can specify the output directory/filename by\n                modifying the jsonfile_prefix. Default: None.\n\n        Returns:\n            str: Path of the output json file.\n        \"\"\"\n        nusc_annos = {}\n        mapped_class_names = self.class_names\n\n        print('Start to convert detection format...')\n\n        for sample_id, det in enumerate(mmcv.track_iter_progress(results)):\n            boxes, scores, labels = det\n            boxes = boxes\n            sample_token = img_metas[sample_id]['token']\n            trans = np.array(img_metas[sample_id]['ego2global_translation'])\n            rot = Quaternion(img_metas[sample_id]['ego2global_rotation'])\n            annos = list()\n            for i, box in enumerate(boxes):\n                name = mapped_class_names[labels[i]]\n                center = box[:3]\n                wlh = box[[4, 3, 5]]\n                box_yaw = box[6]\n                box_vel = box[7:].tolist()\n                box_vel.append(0)\n                quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw)\n                nusc_box = Box(center, wlh, quat, velocity=box_vel)\n                nusc_box.rotate(rot)\n                nusc_box.translate(trans)\n                if np.sqrt(nusc_box.velocity[0]**2 +\n                           nusc_box.velocity[1]**2) > 0.2:\n                    if name in [\n                            'car',\n                            'construction_vehicle',\n                            'bus',\n                            'truck',\n                            'trailer',\n                    ]:\n                        attr = 'vehicle.moving'\n                    elif name in ['bicycle', 'motorcycle']:\n                        attr = 'cycle.with_rider'\n                    else:\n                        attr = self.DefaultAttribute[name]\n                else:\n                    if name in ['pedestrian']:\n                        attr = 'pedestrian.standing'\n                    elif name in ['bus']:\n                        attr = 'vehicle.stopped'\n                    else:\n                        attr = self.DefaultAttribute[name]\n                nusc_anno = dict(\n                    sample_token=sample_token,\n                    translation=nusc_box.center.tolist(),\n                    size=nusc_box.wlh.tolist(),\n                    rotation=nusc_box.orientation.elements.tolist(),\n                    velocity=nusc_box.velocity[:2],\n                    detection_name=name,\n                    detection_score=float(scores[i]),\n                    attribute_name=attr,\n                )\n                annos.append(nusc_anno)\n            # other views results of the same frame should be concatenated\n            if sample_token in nusc_annos:\n                nusc_annos[sample_token].extend(annos)\n            else:\n                nusc_annos[sample_token] = annos\n        nusc_submissions = {\n            'meta': self.modality,\n            'results': nusc_annos,\n        }\n        mmcv.mkdir_or_exist(jsonfile_prefix)\n        res_path = osp.join(jsonfile_prefix, 'results_nusc.json')\n        print('Results writes to', res_path)\n        mmcv.dump(nusc_submissions, res_path)\n        return res_path\n"
  },
  {
    "path": "bevdepth/exps/base_cli.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport os\nfrom argparse import ArgumentParser\n\nimport pytorch_lightning as pl\n\nfrom bevdepth.callbacks.ema import EMACallback\nfrom bevdepth.utils.torch_dist import all_gather_object, synchronize\n\nfrom .nuscenes.base_exp import BEVDepthLightningModel\n\n\ndef run_cli(model_class=BEVDepthLightningModel,\n            exp_name='base_exp',\n            use_ema=False,\n            extra_trainer_config_args={}):\n    parent_parser = ArgumentParser(add_help=False)\n    parent_parser = pl.Trainer.add_argparse_args(parent_parser)\n    parent_parser.add_argument('-e',\n                               '--evaluate',\n                               dest='evaluate',\n                               action='store_true',\n                               help='evaluate model on validation set')\n    parent_parser.add_argument('-p',\n                               '--predict',\n                               dest='predict',\n                               action='store_true',\n                               help='predict model on testing set')\n    parent_parser.add_argument('-b', '--batch_size_per_device', type=int)\n    parent_parser.add_argument('--seed',\n                               type=int,\n                               default=0,\n                               help='seed for initializing training.')\n    parent_parser.add_argument('--ckpt_path', type=str)\n    parser = BEVDepthLightningModel.add_model_specific_args(parent_parser)\n    parser.set_defaults(profiler='simple',\n                        deterministic=False,\n                        max_epochs=extra_trainer_config_args.get('epochs', 24),\n                        accelerator='ddp',\n                        num_sanity_val_steps=0,\n                        gradient_clip_val=5,\n                        limit_val_batches=0,\n                        enable_checkpointing=True,\n                        precision=16,\n                        default_root_dir=os.path.join('./outputs/', exp_name))\n    args = parser.parse_args()\n    if args.seed is not None:\n        pl.seed_everything(args.seed)\n\n    model = model_class(**vars(args))\n    if use_ema:\n        train_dataloader = model.train_dataloader()\n        ema_callback = EMACallback(\n            len(train_dataloader.dataset) * args.max_epochs)\n        trainer = pl.Trainer.from_argparse_args(args, callbacks=[ema_callback])\n    else:\n        trainer = pl.Trainer.from_argparse_args(args)\n    if args.evaluate:\n        trainer.test(model, ckpt_path=args.ckpt_path)\n    elif args.predict:\n        predict_step_outputs = trainer.predict(model, ckpt_path=args.ckpt_path)\n        all_pred_results = list()\n        all_img_metas = list()\n        for predict_step_output in predict_step_outputs:\n            for i in range(len(predict_step_output)):\n                all_pred_results.append(predict_step_output[i][:3])\n                all_img_metas.append(predict_step_output[i][3])\n        synchronize()\n        len_dataset = len(model.test_dataloader().dataset)\n        all_pred_results = sum(\n            map(list, zip(*all_gather_object(all_pred_results))),\n            [])[:len_dataset]\n        all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n                            [])[:len_dataset]\n        model.evaluator._format_bbox(all_pred_results, all_img_metas,\n                                     os.path.dirname(args.ckpt_path))\n    else:\n        trainer.fit(model)\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/MatrixVT/matrixvt_bev_depth_lss_r50_256x704_128x128_24e_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n# isort: skip_file\nfrom bevdepth.exps.base_cli import run_cli\n# Basic Experiment\nfrom bevdepth.exps.nuscenes.mv.bev_depth_lss_r50_256x704_128x128_24e_ema import \\\n    BEVDepthLightningModel as BaseExp # noqa\n# new model\nfrom bevdepth.models.matrixvt_det import MatrixVT_Det\n\n\nclass MatrixVT_Exp(BaseExp):\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        self.model = MatrixVT_Det(self.backbone_conf,\n                                  self.head_conf,\n                                  is_train_depth=True)\n        self.data_use_cbgs = True\n\n\nif __name__ == '__main__':\n    run_cli(\n        MatrixVT_Exp,\n        'matrixvt_bev_depth_lss_r50_256x704_128x128_24e_ema_cbgs',\n        use_ema=True,\n    )\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/base_exp.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport os\nfrom functools import partial\n\nimport mmcv\nimport torch\nimport torch.nn.functional as F\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torchvision.models as models\nfrom pytorch_lightning.core import LightningModule\nfrom torch.cuda.amp.autocast_mode import autocast\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom bevdepth.datasets.nusc_det_dataset import NuscDetDataset, collate_fn\nfrom bevdepth.evaluators.det_evaluators import DetNuscEvaluator\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth\nfrom bevdepth.utils.torch_dist import all_gather_object, get_rank, synchronize\n\nH = 900\nW = 1600\nfinal_dim = (256, 704)\nimg_conf = dict(img_mean=[123.675, 116.28, 103.53],\n                img_std=[58.395, 57.12, 57.375],\n                to_rgb=True)\n\nbackbone_conf = {\n    'x_bound': [-51.2, 51.2, 0.8],\n    'y_bound': [-51.2, 51.2, 0.8],\n    'z_bound': [-5, 3, 8],\n    'd_bound': [2.0, 58.0, 0.5],\n    'final_dim':\n    final_dim,\n    'output_channels':\n    80,\n    'downsample_factor':\n    16,\n    'img_backbone_conf':\n    dict(\n        type='ResNet',\n        depth=50,\n        frozen_stages=0,\n        out_indices=[0, 1, 2, 3],\n        norm_eval=False,\n        init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'),\n    ),\n    'img_neck_conf':\n    dict(\n        type='SECONDFPN',\n        in_channels=[256, 512, 1024, 2048],\n        upsample_strides=[0.25, 0.5, 1, 2],\n        out_channels=[128, 128, 128, 128],\n    ),\n    'depth_net_conf':\n    dict(in_channels=512, mid_channels=512)\n}\nida_aug_conf = {\n    'resize_lim': (0.386, 0.55),\n    'final_dim':\n    final_dim,\n    'rot_lim': (-5.4, 5.4),\n    'H':\n    H,\n    'W':\n    W,\n    'rand_flip':\n    True,\n    'bot_pct_lim': (0.0, 0.0),\n    'cams': [\n        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',\n        'CAM_BACK', 'CAM_BACK_RIGHT'\n    ],\n    'Ncams':\n    6,\n}\n\nbda_aug_conf = {\n    'rot_lim': (-22.5, 22.5),\n    'scale_lim': (0.95, 1.05),\n    'flip_dx_ratio': 0.5,\n    'flip_dy_ratio': 0.5\n}\n\nbev_backbone = dict(\n    type='ResNet',\n    in_channels=80,\n    depth=18,\n    num_stages=3,\n    strides=(1, 2, 2),\n    dilations=(1, 1, 1),\n    out_indices=[0, 1, 2],\n    norm_eval=False,\n    base_channels=160,\n)\n\nbev_neck = dict(type='SECONDFPN',\n                in_channels=[80, 160, 320, 640],\n                upsample_strides=[1, 2, 4, 8],\n                out_channels=[64, 64, 64, 64])\n\nCLASSES = [\n    'car',\n    'truck',\n    'construction_vehicle',\n    'bus',\n    'trailer',\n    'barrier',\n    'motorcycle',\n    'bicycle',\n    'pedestrian',\n    'traffic_cone',\n]\n\nTASKS = [\n    dict(num_class=1, class_names=['car']),\n    dict(num_class=2, class_names=['truck', 'construction_vehicle']),\n    dict(num_class=2, class_names=['bus', 'trailer']),\n    dict(num_class=1, class_names=['barrier']),\n    dict(num_class=2, class_names=['motorcycle', 'bicycle']),\n    dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),\n]\n\ncommon_heads = dict(reg=(2, 2),\n                    height=(1, 2),\n                    dim=(3, 2),\n                    rot=(2, 2),\n                    vel=(2, 2))\n\nbbox_coder = dict(\n    type='CenterPointBBoxCoder',\n    post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],\n    max_num=500,\n    score_threshold=0.1,\n    out_size_factor=4,\n    voxel_size=[0.2, 0.2, 8],\n    pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],\n    code_size=9,\n)\n\ntrain_cfg = dict(\n    point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],\n    grid_size=[512, 512, 1],\n    voxel_size=[0.2, 0.2, 8],\n    out_size_factor=4,\n    dense_reg=1,\n    gaussian_overlap=0.1,\n    max_objs=500,\n    min_radius=2,\n    code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5],\n)\n\ntest_cfg = dict(\n    post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],\n    max_per_img=500,\n    max_pool_nms=False,\n    min_radius=[4, 12, 10, 1, 0.85, 0.175],\n    score_threshold=0.1,\n    out_size_factor=4,\n    voxel_size=[0.2, 0.2, 8],\n    nms_type='circle',\n    pre_max_size=1000,\n    post_max_size=83,\n    nms_thr=0.2,\n)\n\nhead_conf = {\n    'bev_backbone_conf': bev_backbone,\n    'bev_neck_conf': bev_neck,\n    'tasks': TASKS,\n    'common_heads': common_heads,\n    'bbox_coder': bbox_coder,\n    'train_cfg': train_cfg,\n    'test_cfg': test_cfg,\n    'in_channels': 256,  # Equal to bev_neck output_channels.\n    'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'),\n    'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n    'gaussian_overlap': 0.1,\n    'min_radius': 2,\n}\n\n\nclass BEVDepthLightningModel(LightningModule):\n    MODEL_NAMES = sorted(name for name in models.__dict__\n                         if name.islower() and not name.startswith('__')\n                         and callable(models.__dict__[name]))\n\n    def __init__(self,\n                 gpus: int = 1,\n                 data_root='data/nuScenes',\n                 eval_interval=1,\n                 batch_size_per_device=8,\n                 class_names=CLASSES,\n                 backbone_conf=backbone_conf,\n                 head_conf=head_conf,\n                 ida_aug_conf=ida_aug_conf,\n                 bda_aug_conf=bda_aug_conf,\n                 default_root_dir='./outputs/',\n                 **kwargs):\n        super().__init__()\n        self.save_hyperparameters()\n        self.gpus = gpus\n        self.eval_interval = eval_interval\n        self.batch_size_per_device = batch_size_per_device\n        self.data_root = data_root\n        self.basic_lr_per_img = 2e-4 / 64\n        self.class_names = class_names\n        self.backbone_conf = backbone_conf\n        self.head_conf = head_conf\n        self.ida_aug_conf = ida_aug_conf\n        self.bda_aug_conf = bda_aug_conf\n        mmcv.mkdir_or_exist(default_root_dir)\n        self.default_root_dir = default_root_dir\n        self.evaluator = DetNuscEvaluator(class_names=self.class_names,\n                                          output_dir=self.default_root_dir)\n        self.model = BaseBEVDepth(self.backbone_conf,\n                                  self.head_conf,\n                                  is_train_depth=True)\n        self.mode = 'valid'\n        self.img_conf = img_conf\n        self.data_use_cbgs = False\n        self.num_sweeps = 1\n        self.sweep_idxes = list()\n        self.key_idxes = list()\n        self.data_return_depth = True\n        self.downsample_factor = self.backbone_conf['downsample_factor']\n        self.dbound = self.backbone_conf['d_bound']\n        self.depth_channels = int(\n            (self.dbound[1] - self.dbound[0]) / self.dbound[2])\n        self.use_fusion = False\n        self.train_info_paths = os.path.join(self.data_root,\n                                             'nuscenes_infos_train.pkl')\n        self.val_info_paths = os.path.join(self.data_root,\n                                           'nuscenes_infos_val.pkl')\n        self.predict_info_paths = os.path.join(self.data_root,\n                                               'nuscenes_infos_test.pkl')\n\n    def forward(self, sweep_imgs, mats):\n        return self.model(sweep_imgs, mats)\n\n    def training_step(self, batch):\n        (sweep_imgs, mats, _, _, gt_boxes, gt_labels, depth_labels) = batch\n        if torch.cuda.is_available():\n            for key, value in mats.items():\n                mats[key] = value.cuda()\n            sweep_imgs = sweep_imgs.cuda()\n            gt_boxes = [gt_box.cuda() for gt_box in gt_boxes]\n            gt_labels = [gt_label.cuda() for gt_label in gt_labels]\n        preds, depth_preds = self(sweep_imgs, mats)\n        if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n            targets = self.model.module.get_targets(gt_boxes, gt_labels)\n            detection_loss = self.model.module.loss(targets, preds)\n        else:\n            targets = self.model.get_targets(gt_boxes, gt_labels)\n            detection_loss = self.model.loss(targets, preds)\n\n        if len(depth_labels.shape) == 5:\n            # only key-frame will calculate depth loss\n            depth_labels = depth_labels[:, 0, ...]\n        depth_loss = self.get_depth_loss(depth_labels.cuda(), depth_preds)\n        self.log('detection_loss', detection_loss)\n        self.log('depth_loss', depth_loss)\n        return detection_loss + depth_loss\n\n    def get_depth_loss(self, depth_labels, depth_preds):\n        depth_labels = self.get_downsampled_gt_depth(depth_labels)\n        depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view(\n            -1, self.depth_channels)\n        fg_mask = torch.max(depth_labels, dim=1).values > 0.0\n\n        with autocast(enabled=False):\n            depth_loss = (F.binary_cross_entropy(\n                depth_preds[fg_mask],\n                depth_labels[fg_mask],\n                reduction='none',\n            ).sum() / max(1.0, fg_mask.sum()))\n\n        return 3.0 * depth_loss\n\n    def get_downsampled_gt_depth(self, gt_depths):\n        \"\"\"\n        Input:\n            gt_depths: [B, N, H, W]\n        Output:\n            gt_depths: [B*N*h*w, d]\n        \"\"\"\n        B, N, H, W = gt_depths.shape\n        gt_depths = gt_depths.view(\n            B * N,\n            H // self.downsample_factor,\n            self.downsample_factor,\n            W // self.downsample_factor,\n            self.downsample_factor,\n            1,\n        )\n        gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous()\n        gt_depths = gt_depths.view(\n            -1, self.downsample_factor * self.downsample_factor)\n        gt_depths_tmp = torch.where(gt_depths == 0.0,\n                                    1e5 * torch.ones_like(gt_depths),\n                                    gt_depths)\n        gt_depths = torch.min(gt_depths_tmp, dim=-1).values\n        gt_depths = gt_depths.view(B * N, H // self.downsample_factor,\n                                   W // self.downsample_factor)\n\n        gt_depths = (gt_depths -\n                     (self.dbound[0] - self.dbound[2])) / self.dbound[2]\n        gt_depths = torch.where(\n            (gt_depths < self.depth_channels + 1) & (gt_depths >= 0.0),\n            gt_depths, torch.zeros_like(gt_depths))\n        gt_depths = F.one_hot(gt_depths.long(),\n                              num_classes=self.depth_channels + 1).view(\n                                  -1, self.depth_channels + 1)[:, 1:]\n\n        return gt_depths.float()\n\n    def eval_step(self, batch, batch_idx, prefix: str):\n        (sweep_imgs, mats, _, img_metas, _, _) = batch\n        if torch.cuda.is_available():\n            for key, value in mats.items():\n                mats[key] = value.cuda()\n            sweep_imgs = sweep_imgs.cuda()\n        preds = self.model(sweep_imgs, mats)\n        if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n            results = self.model.module.get_bboxes(preds, img_metas)\n        else:\n            results = self.model.get_bboxes(preds, img_metas)\n        for i in range(len(results)):\n            results[i][0] = results[i][0].detach().cpu().numpy()\n            results[i][1] = results[i][1].detach().cpu().numpy()\n            results[i][2] = results[i][2].detach().cpu().numpy()\n            results[i].append(img_metas[i])\n        return results\n\n    def validation_step(self, batch, batch_idx):\n        return self.eval_step(batch, batch_idx, 'val')\n\n    def validation_epoch_end(self, validation_step_outputs):\n        all_pred_results = list()\n        all_img_metas = list()\n        for validation_step_output in validation_step_outputs:\n            for i in range(len(validation_step_output)):\n                all_pred_results.append(validation_step_output[i][:3])\n                all_img_metas.append(validation_step_output[i][3])\n        synchronize()\n        len_dataset = len(self.val_dataloader().dataset)\n        all_pred_results = sum(\n            map(list, zip(*all_gather_object(all_pred_results))),\n            [])[:len_dataset]\n        all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n                            [])[:len_dataset]\n        if get_rank() == 0:\n            self.evaluator.evaluate(all_pred_results, all_img_metas)\n\n    def test_epoch_end(self, test_step_outputs):\n        all_pred_results = list()\n        all_img_metas = list()\n        for test_step_output in test_step_outputs:\n            for i in range(len(test_step_output)):\n                all_pred_results.append(test_step_output[i][:3])\n                all_img_metas.append(test_step_output[i][3])\n        synchronize()\n        # TODO: Change another way.\n        dataset_length = len(self.val_dataloader().dataset)\n        all_pred_results = sum(\n            map(list, zip(*all_gather_object(all_pred_results))),\n            [])[:dataset_length]\n        all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))),\n                            [])[:dataset_length]\n        if get_rank() == 0:\n            self.evaluator.evaluate(all_pred_results, all_img_metas)\n\n    def configure_optimizers(self):\n        lr = self.basic_lr_per_img * \\\n            self.batch_size_per_device * self.gpus\n        optimizer = torch.optim.AdamW(self.model.parameters(),\n                                      lr=lr,\n                                      weight_decay=1e-7)\n        scheduler = MultiStepLR(optimizer, [19, 23])\n        return [[optimizer], [scheduler]]\n\n    def train_dataloader(self):\n        train_dataset = NuscDetDataset(ida_aug_conf=self.ida_aug_conf,\n                                       bda_aug_conf=self.bda_aug_conf,\n                                       classes=self.class_names,\n                                       data_root=self.data_root,\n                                       info_paths=self.train_info_paths,\n                                       is_train=True,\n                                       use_cbgs=self.data_use_cbgs,\n                                       img_conf=self.img_conf,\n                                       num_sweeps=self.num_sweeps,\n                                       sweep_idxes=self.sweep_idxes,\n                                       key_idxes=self.key_idxes,\n                                       return_depth=self.data_return_depth,\n                                       use_fusion=self.use_fusion)\n\n        train_loader = torch.utils.data.DataLoader(\n            train_dataset,\n            batch_size=self.batch_size_per_device,\n            num_workers=4,\n            drop_last=True,\n            shuffle=False,\n            collate_fn=partial(collate_fn,\n                               is_return_depth=self.data_return_depth\n                               or self.use_fusion),\n            sampler=None,\n        )\n        return train_loader\n\n    def val_dataloader(self):\n        val_dataset = NuscDetDataset(ida_aug_conf=self.ida_aug_conf,\n                                     bda_aug_conf=self.bda_aug_conf,\n                                     classes=self.class_names,\n                                     data_root=self.data_root,\n                                     info_paths=self.val_info_paths,\n                                     is_train=False,\n                                     img_conf=self.img_conf,\n                                     num_sweeps=self.num_sweeps,\n                                     sweep_idxes=self.sweep_idxes,\n                                     key_idxes=self.key_idxes,\n                                     return_depth=self.use_fusion,\n                                     use_fusion=self.use_fusion)\n        val_loader = torch.utils.data.DataLoader(\n            val_dataset,\n            batch_size=self.batch_size_per_device,\n            shuffle=False,\n            collate_fn=partial(collate_fn, is_return_depth=self.use_fusion),\n            num_workers=4,\n            sampler=None,\n        )\n        return val_loader\n\n    def test_dataloader(self):\n        return self.val_dataloader()\n\n    def predict_dataloader(self):\n        predict_dataset = NuscDetDataset(ida_aug_conf=self.ida_aug_conf,\n                                         bda_aug_conf=self.bda_aug_conf,\n                                         classes=self.class_names,\n                                         data_root=self.data_root,\n                                         info_paths=self.predict_info_paths,\n                                         is_train=False,\n                                         img_conf=self.img_conf,\n                                         num_sweeps=self.num_sweeps,\n                                         sweep_idxes=self.sweep_idxes,\n                                         key_idxes=self.key_idxes,\n                                         return_depth=self.use_fusion,\n                                         use_fusion=self.use_fusion)\n        predict_loader = torch.utils.data.DataLoader(\n            predict_dataset,\n            batch_size=self.batch_size_per_device,\n            shuffle=False,\n            collate_fn=partial(collate_fn, is_return_depth=self.use_fusion),\n            num_workers=4,\n            sampler=None,\n        )\n        return predict_loader\n\n    def test_step(self, batch, batch_idx):\n        return self.eval_step(batch, batch_idx, 'test')\n\n    def predict_step(self, batch, batch_idx):\n        return self.eval_step(batch, batch_idx, 'predict')\n\n    @staticmethod\n    def add_model_specific_args(parent_parser):  # pragma: no-cover\n        return parent_parser\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/fusion/bev_depth_fusion_lss_r50_256x704_128x128_24e.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.utils.data.distributed\n\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.base_exp import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel\nfrom bevdepth.models.fusion_bev_depth import FusionBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, *args, **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        self.model = FusionBEVDepth(self.backbone_conf,\n                                    self.head_conf,\n                                    is_train_depth=False)\n        self.use_fusion = True\n\n    def forward(self, sweep_imgs, mats, lidar_depth):\n        return self.model(sweep_imgs, mats, lidar_depth)\n\n    def training_step(self, batch):\n        (sweep_imgs, mats, _, _, gt_boxes, gt_labels, lidar_depth) = batch\n        if torch.cuda.is_available():\n            for key, value in mats.items():\n                mats[key] = value.cuda()\n            sweep_imgs = sweep_imgs.cuda()\n            gt_boxes = [gt_box.cuda() for gt_box in gt_boxes]\n            gt_labels = [gt_label.cuda() for gt_label in gt_labels]\n        preds = self(sweep_imgs, mats, lidar_depth)\n        if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n            targets = self.model.module.get_targets(gt_boxes, gt_labels)\n            detection_loss = self.model.module.loss(targets, preds)\n        else:\n            targets = self.model.get_targets(gt_boxes, gt_labels)\n            detection_loss = self.model.loss(targets, preds)\n\n        if len(lidar_depth.shape) == 5:\n            # only key-frame will calculate depth loss\n            lidar_depth = lidar_depth[:, 0, ...]\n        self.log('detection_loss', detection_loss)\n        return detection_loss\n\n    def eval_step(self, batch, batch_idx, prefix: str):\n        (sweep_imgs, mats, _, img_metas, _, _, lidar_depth) = batch\n        if torch.cuda.is_available():\n            for key, value in mats.items():\n                mats[key] = value.cuda()\n            sweep_imgs = sweep_imgs.cuda()\n        preds = self.model(sweep_imgs, mats, lidar_depth)\n        if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):\n            results = self.model.module.get_bboxes(preds, img_metas)\n        else:\n            results = self.model.get_bboxes(preds, img_metas)\n        for i in range(len(results)):\n            results[i][0] = results[i][0].detach().cpu().numpy()\n            results[i][1] = results[i][1].detach().cpu().numpy()\n            results[i][2] = results[i][2].detach().cpu().numpy()\n            results[i].append(img_metas[i])\n        return results\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_fusion_lss_r50_256x704_128x128_24e')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/fusion/bev_depth_fusion_lss_r50_256x704_128x128_24e_2key.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.fusion.bev_depth_fusion_lss_r50_256x704_128x128_24e import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\nfrom bevdepth.models.fusion_bev_depth import FusionBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.key_idxes = [-1]\n        self.head_conf['bev_backbone_conf']['in_channels'] = 80 * (\n            len(self.key_idxes) + 1)\n        self.head_conf['bev_neck_conf']['in_channels'] = [\n            80 * (len(self.key_idxes) + 1), 160, 320, 640\n        ]\n        self.head_conf['train_cfg']['code_weight'] = [\n            1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0\n        ]\n        self.model = FusionBEVDepth(self.backbone_conf, self.head_conf)\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_fusion_lss_r50_256x704_128x128_24e_2key')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/fusion/bev_depth_fusion_lss_r50_256x704_128x128_24e_2key_trainval.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nfrom bevdepth.exps.base_cli import run_cli\n\nfrom .bev_depth_fusion_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, *args, **kwargs) -> None:\n        super().__init__(*args, **kwargs)\n        self.train_info_paths = [\n            'data/nuScenes/nuscenes_infos_train.pkl',\n            'data/nuScenes/nuscenes_infos_val.pkl'\n        ]\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_fusion_lss_r50_256x704_128x128_24e_2key_trainval')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/fusion/bev_depth_fusion_lss_r50_256x704_128x128_24e_key4.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.fusion.bev_depth_fusion_lss_r50_256x704_128x128_24e import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\nfrom bevdepth.models.fusion_bev_depth import FusionBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.sweep_idxes = [4]\n        self.head_conf['bev_backbone_conf']['in_channels'] = 80 * (\n            len(self.sweep_idxes) + 1)\n        self.head_conf['bev_neck_conf']['in_channels'] = [\n            80 * (len(self.sweep_idxes) + 1), 160, 320, 640\n        ]\n        self.head_conf['train_cfg']['code_weight'] = [\n            1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0\n        ]\n        self.model = FusionBEVDepth(self.backbone_conf,\n                                    self.head_conf,\n                                    is_train_depth=False)\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_fusion_lss_r50_256x704_128x128_24e_key4')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3484\nmATE: 0.6159\nmASE: 0.2716\nmAOE: 0.4144\nmAVE: 0.4402\nmAAE: 0.1954\nNDS: 0.4805\nEval time: 110.7s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.553   0.480   0.157   0.117   0.386   0.205\ntruck   0.252   0.645   0.202   0.097   0.381   0.185\nbus     0.378   0.674   0.197   0.090   0.871   0.298\ntrailer 0.163   0.932   0.230   0.409   0.543   0.098\nconstruction_vehicle    0.076   0.878   0.495   1.015   0.103   0.344\npedestrian      0.361   0.694   0.300   0.816   0.491   0.247\nmotorcycle      0.319   0.569   0.252   0.431   0.552   0.181\nbicycle 0.286   0.457   0.255   0.630   0.194   0.006\ntraffic_cone    0.536   0.438   0.339   nan     nan     nan\nbarrier 0.559   0.392   0.289   0.124   nan     nan\n\"\"\"\nimport torch\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_depth_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth as BaseBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.backbone_conf['use_da'] = True\n        self.data_use_cbgs = True\n        self.model = BaseBEVDepth(self.backbone_conf,\n                                  self.head_conf,\n                                  is_train_depth=True)\n\n    def configure_optimizers(self):\n        lr = self.basic_lr_per_img * \\\n            self.batch_size_per_device * self.gpus\n        optimizer = torch.optim.AdamW(self.model.parameters(),\n                                      lr=lr,\n                                      weight_decay=1e-7)\n        scheduler = MultiStepLR(optimizer, [16, 19])\n        return [[optimizer], [scheduler]]\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da',\n            extra_trainer_config_args={'epochs': 20})\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3589\nmATE: 0.6119\nmASE: 0.2692\nmAOE: 0.5074\nmAVE: 0.4086\nmAAE: 0.2009\nNDS: 0.4797\nEval time: 183.3s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.559   0.475   0.157   0.112   0.370   0.205\ntruck   0.270   0.659   0.196   0.103   0.356   0.181\nbus     0.374   0.651   0.184   0.072   0.846   0.326\ntrailer 0.179   0.963   0.227   0.512   0.294   0.127\nconstruction_vehicle    0.081   0.825   0.481   1.352   0.094   0.345\npedestrian      0.363   0.690   0.297   0.831   0.491   0.244\nmotorcycle      0.354   0.580   0.255   0.545   0.615   0.164\nbicycle 0.301   0.447   0.280   0.920   0.203   0.015\ntraffic_cone    0.539   0.435   0.324   nan     nan     nan\nbarrier 0.569   0.394   0.293   0.120   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da import \\\n    BEVDepthLightningModel  # noqa\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema',\n            use_ema=True,\n            extra_trainer_config_args={'epochs': 20})\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_24e_2key.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3304\nmATE: 0.7021\nmASE: 0.2795\nmAOE: 0.5346\nmAVE: 0.5530\nmAAE: 0.2274\nNDS: 0.4355\nEval time: 171.8s\n\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.499   0.540   0.165   0.211   0.650   0.233\ntruck   0.278   0.719   0.218   0.265   0.547   0.215\nbus     0.386   0.661   0.211   0.171   1.132   0.274\ntrailer 0.168   1.034   0.235   0.548   0.408   0.168\nconstruction_vehicle    0.075   1.124   0.510   1.177   0.111   0.385\npedestrian      0.284   0.757   0.298   0.966   0.578   0.301\nmotorcycle      0.335   0.624   0.263   0.621   0.734   0.237\nbicycle 0.305   0.554   0.264   0.653   0.263   0.006\ntraffic_cone    0.462   0.516   0.355   nan     nan     nan\nbarrier 0.512   0.491   0.275   0.200   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.base_exp import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.key_idxes = [-1]\n        self.head_conf['bev_backbone_conf']['in_channels'] = 80 * (\n            len(self.key_idxes) + 1)\n        self.head_conf['bev_neck_conf']['in_channels'] = [\n            80 * (len(self.key_idxes) + 1), 160, 320, 640\n        ]\n        self.head_conf['train_cfg']['code_weights'] = [\n            1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0\n        ]\n        self.model = BaseBEVDepth(self.backbone_conf,\n                                  self.head_conf,\n                                  is_train_depth=True)\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_256x704_128x128_24e_2key')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_24e_2key_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3329\nmATE: 0.6832\nmASE: 0.2761\nmAOE: 0.5446\nmAVE: 0.5258\nmAAE: 0.2259\nNDS: 0.4409\n\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.505   0.531   0.165   0.189   0.618   0.234\ntruck   0.274   0.731   0.206   0.211   0.546   0.223\nbus     0.394   0.673   0.219   0.148   1.061   0.274\ntrailer 0.174   0.934   0.228   0.544   0.369   0.183\nconstruction_vehicle    0.079   1.043   0.528   1.162   0.112   0.376\npedestrian      0.284   0.748   0.294   0.973   0.575   0.297\nmotorcycle      0.345   0.633   0.256   0.719   0.667   0.214\nbicycle 0.314   0.544   0.252   0.778   0.259   0.007\ntraffic_cone    0.453   0.519   0.335   nan     nan     nan\nbarrier 0.506   0.475   0.279   0.178   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_depth_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel  # noqa\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_256x704_128x128_24e_2key_ema',\n            use_ema=True)\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_256x704_128x128_24e_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nimport torch.nn.parallel\nimport torch.utils.data\nimport torch.utils.data.distributed\n\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.base_exp import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def configure_optimizers(self):\n        lr = self.basic_lr_per_img * \\\n            self.batch_size_per_device * self.gpus\n        optimizer = torch.optim.AdamW(self.model.parameters(),\n                                      lr=lr,\n                                      weight_decay=1e-7)\n        return [optimizer]\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_256x704_128x128_24e_ema',\n            use_ema=True)\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_512x1408_128x128_24e_2key.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_depth_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        final_dim = (512, 1408)\n        self.backbone_conf['final_dim'] = final_dim\n        self.ida_aug_conf['resize_lim'] = (0.386 * 2, 0.55 * 2)\n        self.ida_aug_conf['final_dim'] = final_dim\n        self.model = BaseBEVDepth(self.backbone_conf,\n                                  self.head_conf,\n                                  is_train_depth=True)\n\n    def configure_optimizers(self):\n        lr = self.basic_lr_per_img * \\\n            self.batch_size_per_device * self.gpus\n        optimizer = torch.optim.AdamW(self.model.parameters(),\n                                      lr=lr,\n                                      weight_decay=1e-3)\n        scheduler = MultiStepLR(optimizer, [19, 23])\n        return [[optimizer], [scheduler]]\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_512x1408_128x128_24e_2key')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_depth_lss_r50_640x1600_128x128_24e_2key.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_depth_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        final_dim = (640, 1600)\n        self.backbone_conf['final_dim'] = final_dim\n        self.ida_aug_conf['resize_lim'] = (0.386 * 2, 0.55 * 2)\n        self.ida_aug_conf['final_dim'] = final_dim\n        self.model = BaseBEVDepth(self.backbone_conf,\n                                  self.head_conf,\n                                  is_train_depth=True)\n\n    def configure_optimizers(self):\n        lr = self.basic_lr_per_img * \\\n            self.batch_size_per_device * self.gpus\n        optimizer = torch.optim.AdamW(self.model.parameters(),\n                                      lr=lr,\n                                      weight_decay=1e-3)\n        scheduler = MultiStepLR(optimizer, [19, 23])\n        return [[optimizer], [scheduler]]\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_depth_lss_r50_512x1408_128x128_24e_2key')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3576\nmATE: 0.6071\nmASE: 0.2684\nmAOE: 0.4157\nmAVE: 0.3928\nmAAE: 0.2021\nNDS: 0.4902\nEval time: 129.7s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.559   0.465   0.157   0.110   0.350   0.205\ntruck   0.285   0.633   0.205   0.101   0.304   0.209\nbus     0.373   0.667   0.204   0.076   0.896   0.345\ntrailer 0.167   0.956   0.228   0.482   0.289   0.100\nconstruction_vehicle    0.077   0.869   0.454   1.024   0.108   0.335\npedestrian      0.402   0.652   0.299   0.821   0.493   0.253\nmotorcycle      0.321   0.544   0.255   0.484   0.529   0.159\nbicycle 0.276   0.466   0.272   0.522   0.173   0.011\ntraffic_cone    0.551   0.432   0.321   nan     nan     nan\nbarrier 0.565   0.386   0.287   0.121   nan     nan\n\"\"\"\nimport torch\nfrom torch.optim.lr_scheduler import MultiStepLR\n\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_stereo_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\nfrom bevdepth.models.bev_stereo import BEVStereo\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.backbone_conf['use_da'] = True\n        self.data_use_cbgs = True\n        self.basic_lr_per_img = 2e-4 / 32\n        self.model = BEVStereo(self.backbone_conf,\n                               self.head_conf,\n                               is_train_depth=True)\n\n    def configure_optimizers(self):\n        lr = self.basic_lr_per_img * \\\n            self.batch_size_per_device * self.gpus\n        optimizer = torch.optim.AdamW(self.model.parameters(),\n                                      lr=lr,\n                                      weight_decay=1e-2)\n        scheduler = MultiStepLR(optimizer, [16, 19])\n        return [[optimizer], [scheduler]]\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da',\n            extra_trainer_config_args={'epochs': 20})\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3721\nmATE: 0.5980\nmASE: 0.2701\nmAOE: 0.4381\nmAVE: 0.3672\nmAAE: 0.1898\nNDS: 0.4997\nEval time: 138.0s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.567   0.457   0.156   0.104   0.343   0.204\ntruck   0.299   0.650   0.205   0.103   0.321   0.197\nbus     0.394   0.613   0.203   0.106   0.643   0.252\ntrailer 0.178   0.991   0.239   0.433   0.345   0.070\nconstruction_vehicle    0.102   0.826   0.458   1.055   0.114   0.372\npedestrian      0.402   0.653   0.297   0.803   0.479   0.249\nmotorcycle      0.356   0.553   0.251   0.450   0.512   0.168\nbicycle 0.311   0.440   0.265   0.779   0.180   0.006\ntraffic_cone    0.552   0.420   0.336   nan     nan     nan\nbarrier 0.561   0.377   0.291   0.111   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da import \\\n    BEVDepthLightningModel  # noqa\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_stereo_lss_r50_256x704_128x128_20e_cbgs_2key_da_ema',\n            use_ema=True,\n            extra_trainer_config_args={'epochs': 20})\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_2key.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3456\nmATE: 0.6589\nmASE: 0.2774\nmAOE: 0.5500\nmAVE: 0.4980\nmAAE: 0.2278\nNDS: 0.4516\nEval time: 158.2s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.510   0.525   0.165   0.188   0.510   0.226\ntruck   0.288   0.698   0.220   0.205   0.443   0.227\nbus     0.378   0.622   0.210   0.135   0.896   0.289\ntrailer 0.156   1.003   0.219   0.482   0.609   0.179\nconstruction_vehicle    0.094   0.929   0.502   1.209   0.108   0.365\npedestrian      0.356   0.728   0.297   1.005   0.579   0.319\nmotorcycle      0.361   0.571   0.258   0.734   0.631   0.211\nbicycle 0.318   0.533   0.269   0.793   0.208   0.007\ntraffic_cone    0.488   0.501   0.355   nan     nan     nan\nbarrier 0.506   0.478   0.277   0.200   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.base_exp import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel\nfrom bevdepth.models.bev_stereo import BEVStereo\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.key_idxes = [-1]\n        self.head_conf['bev_backbone_conf']['in_channels'] = 80 * (\n            len(self.key_idxes) + 1)\n        self.head_conf['bev_neck_conf']['in_channels'] = [\n            80 * (len(self.key_idxes) + 1), 160, 320, 640\n        ]\n        self.head_conf['train_cfg']['code_weights'] = [\n            1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0\n        ]\n        self.head_conf['test_cfg']['thresh_scale'] = [\n            0.6, 0.4, 0.4, 0.7, 0.8, 0.9\n        ]\n        self.head_conf['test_cfg']['nms_type'] = 'size_aware_circle'\n        self.model = BEVStereo(self.backbone_conf,\n                               self.head_conf,\n                               is_train_depth=True)\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_stereo_lss_r50_256x704_128x128_24e_2key')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_2key_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3494\nmATE: 0.6672\nmASE: 0.2785\nmAOE: 0.5607\nmAVE: 0.4687\nmAAE: 0.2295\nNDS: 0.4542\nEval time: 166.7s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.509   0.522   0.163   0.187   0.507   0.228\ntruck   0.287   0.694   0.213   0.202   0.449   0.229\nbus     0.390   0.681   0.207   0.152   0.902   0.261\ntrailer 0.167   0.945   0.248   0.491   0.340   0.185\nconstruction_vehicle    0.087   1.057   0.515   1.199   0.104   0.377\npedestrian      0.351   0.729   0.299   0.987   0.575   0.321\nmotorcycle      0.368   0.581   0.262   0.721   0.663   0.226\nbicycle 0.338   0.494   0.258   0.921   0.209   0.008\ntraffic_cone    0.494   0.502   0.341   nan     nan     nan\nbarrier 0.502   0.467   0.278   0.185   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_stereo_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel  # noqa\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_stereo_lss_r50_256x704_128x128_24e_2key_ema',\n            use_ema=True)\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_key4.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3427\nmATE: 0.6560\nmASE: 0.2784\nmAOE: 0.5982\nmAVE: 0.5347\nmAAE: 0.2228\nNDS: 0.4423\nEval time: 116.3s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.508   0.518   0.163   0.188   0.534   0.230\ntruck   0.268   0.709   0.214   0.215   0.510   0.226\nbus     0.379   0.640   0.207   0.142   1.049   0.315\ntrailer 0.151   0.953   0.240   0.541   0.618   0.113\nconstruction_vehicle    0.092   0.955   0.514   1.360   0.113   0.394\npedestrian      0.350   0.727   0.300   1.013   0.598   0.328\nmotorcycle      0.371   0.576   0.259   0.777   0.634   0.175\nbicycle 0.325   0.512   0.261   0.942   0.221   0.002\ntraffic_cone    0.489   0.503   0.345   nan     nan     nan\nbarrier 0.495   0.468   0.280   0.206   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_stereo_lss_r50_256x704_128x128_24e_2key import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.num_sweeps = 2\n        self.sweep_idxes = [4]\n        self.key_idxes = list()\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_stereo_lss_r50_256x704_128x128_24e_key4')\n"
  },
  {
    "path": "bevdepth/exps/nuscenes/mv/bev_stereo_lss_r50_256x704_128x128_24e_key4_ema.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\n\"\"\"\nmAP: 0.3427\nmATE: 0.6560\nmASE: 0.2784\nmAOE: 0.5982\nmAVE: 0.5347\nmAAE: 0.2228\nNDS: 0.4423\nEval time: 116.3s\nPer-class results:\nObject Class    AP      ATE     ASE     AOE     AVE     AAE\ncar     0.508   0.518   0.163   0.188   0.534   0.230\ntruck   0.268   0.709   0.214   0.215   0.510   0.226\nbus     0.379   0.640   0.207   0.142   1.049   0.315\ntrailer 0.151   0.953   0.240   0.541   0.618   0.113\nconstruction_vehicle    0.092   0.955   0.514   1.360   0.113   0.394\npedestrian      0.350   0.727   0.300   1.013   0.598   0.328\nmotorcycle      0.371   0.576   0.259   0.777   0.634   0.175\nbicycle 0.325   0.512   0.261   0.942   0.221   0.002\ntraffic_cone    0.489   0.503   0.345   nan     nan     nan\nbarrier 0.495   0.468   0.280   0.206   nan     nan\n\"\"\"\nfrom bevdepth.exps.base_cli import run_cli\nfrom bevdepth.exps.nuscenes.mv.bev_stereo_lss_r50_256x704_128x128_24e_key4 import \\\n    BEVDepthLightningModel as BaseBEVDepthLightningModel  # noqa\n\n\nclass BEVDepthLightningModel(BaseBEVDepthLightningModel):\n\n    def __init__(self, **kwargs):\n        super().__init__(**kwargs)\n        self.num_sweeps = 2\n        self.sweep_idxes = [4]\n        self.key_idxes = list()\n\n\nif __name__ == '__main__':\n    run_cli(BEVDepthLightningModel,\n            'bev_stereo_lss_r50_256x704_128x128_24e_key4_ema')\n"
  },
  {
    "path": "bevdepth/layers/__init__.py",
    "content": "from .heads.bev_depth_head import BEVDepthHead\n\n__all__ = ['BEVDepthHead']\n"
  },
  {
    "path": "bevdepth/layers/backbones/__init__.py",
    "content": "from .base_lss_fpn import BaseLSSFPN\nfrom .fusion_lss_fpn import FusionLSSFPN\n\n__all__ = ['BaseLSSFPN', 'FusionLSSFPN']\n"
  },
  {
    "path": "bevdepth/layers/backbones/base_lss_fpn.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import build_conv_layer\nfrom mmdet3d.models import build_neck\nfrom mmdet.models import build_backbone\nfrom mmdet.models.backbones.resnet import BasicBlock\nfrom torch import nn\nfrom torch.cuda.amp.autocast_mode import autocast\n\ntry:\n    from bevdepth.ops.voxel_pooling_inference import voxel_pooling_inference\n    from bevdepth.ops.voxel_pooling_train import voxel_pooling_train\nexcept ImportError:\n    print('Import VoxelPooling fail.')\n\n__all__ = ['BaseLSSFPN']\n\n\nclass _ASPPModule(nn.Module):\n\n    def __init__(self, inplanes, planes, kernel_size, padding, dilation,\n                 BatchNorm):\n        super(_ASPPModule, self).__init__()\n        self.atrous_conv = nn.Conv2d(inplanes,\n                                     planes,\n                                     kernel_size=kernel_size,\n                                     stride=1,\n                                     padding=padding,\n                                     dilation=dilation,\n                                     bias=False)\n        self.bn = BatchNorm(planes)\n        self.relu = nn.ReLU()\n\n        self._init_weight()\n\n    def forward(self, x):\n        x = self.atrous_conv(x)\n        x = self.bn(x)\n\n        return self.relu(x)\n\n    def _init_weight(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                torch.nn.init.kaiming_normal_(m.weight)\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\nclass ASPP(nn.Module):\n\n    def __init__(self, inplanes, mid_channels=256, BatchNorm=nn.BatchNorm2d):\n        super(ASPP, self).__init__()\n\n        dilations = [1, 6, 12, 18]\n\n        self.aspp1 = _ASPPModule(inplanes,\n                                 mid_channels,\n                                 1,\n                                 padding=0,\n                                 dilation=dilations[0],\n                                 BatchNorm=BatchNorm)\n        self.aspp2 = _ASPPModule(inplanes,\n                                 mid_channels,\n                                 3,\n                                 padding=dilations[1],\n                                 dilation=dilations[1],\n                                 BatchNorm=BatchNorm)\n        self.aspp3 = _ASPPModule(inplanes,\n                                 mid_channels,\n                                 3,\n                                 padding=dilations[2],\n                                 dilation=dilations[2],\n                                 BatchNorm=BatchNorm)\n        self.aspp4 = _ASPPModule(inplanes,\n                                 mid_channels,\n                                 3,\n                                 padding=dilations[3],\n                                 dilation=dilations[3],\n                                 BatchNorm=BatchNorm)\n\n        self.global_avg_pool = nn.Sequential(\n            nn.AdaptiveAvgPool2d((1, 1)),\n            nn.Conv2d(inplanes, mid_channels, 1, stride=1, bias=False),\n            BatchNorm(mid_channels),\n            nn.ReLU(),\n        )\n        self.conv1 = nn.Conv2d(int(mid_channels * 5),\n                               mid_channels,\n                               1,\n                               bias=False)\n        self.bn1 = BatchNorm(mid_channels)\n        self.relu = nn.ReLU()\n        self.dropout = nn.Dropout(0.5)\n        self._init_weight()\n\n    def forward(self, x):\n        x1 = self.aspp1(x)\n        x2 = self.aspp2(x)\n        x3 = self.aspp3(x)\n        x4 = self.aspp4(x)\n        x5 = self.global_avg_pool(x)\n        x5 = F.interpolate(x5,\n                           size=x4.size()[2:],\n                           mode='bilinear',\n                           align_corners=True)\n        x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.relu(x)\n\n        return self.dropout(x)\n\n    def _init_weight(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                torch.nn.init.kaiming_normal_(m.weight)\n            elif isinstance(m, nn.BatchNorm2d):\n                m.weight.data.fill_(1)\n                m.bias.data.zero_()\n\n\nclass Mlp(nn.Module):\n\n    def __init__(self,\n                 in_features,\n                 hidden_features=None,\n                 out_features=None,\n                 act_layer=nn.ReLU,\n                 drop=0.0):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.drop1 = nn.Dropout(drop)\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop2 = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop1(x)\n        x = self.fc2(x)\n        x = self.drop2(x)\n        return x\n\n\nclass SELayer(nn.Module):\n\n    def __init__(self, channels, act_layer=nn.ReLU, gate_layer=nn.Sigmoid):\n        super().__init__()\n        self.conv_reduce = nn.Conv2d(channels, channels, 1, bias=True)\n        self.act1 = act_layer()\n        self.conv_expand = nn.Conv2d(channels, channels, 1, bias=True)\n        self.gate = gate_layer()\n\n    def forward(self, x, x_se):\n        x_se = self.conv_reduce(x_se)\n        x_se = self.act1(x_se)\n        x_se = self.conv_expand(x_se)\n        return x * self.gate(x_se)\n\n\nclass DepthNet(nn.Module):\n\n    def __init__(self, in_channels, mid_channels, context_channels,\n                 depth_channels):\n        super(DepthNet, self).__init__()\n        self.reduce_conv = nn.Sequential(\n            nn.Conv2d(in_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n        self.context_conv = nn.Conv2d(mid_channels,\n                                      context_channels,\n                                      kernel_size=1,\n                                      stride=1,\n                                      padding=0)\n        self.bn = nn.BatchNorm1d(27)\n        self.depth_mlp = Mlp(27, mid_channels, mid_channels)\n        self.depth_se = SELayer(mid_channels)  # NOTE: add camera-aware\n        self.context_mlp = Mlp(27, mid_channels, mid_channels)\n        self.context_se = SELayer(mid_channels)  # NOTE: add camera-aware\n        self.depth_conv = nn.Sequential(\n            BasicBlock(mid_channels, mid_channels),\n            BasicBlock(mid_channels, mid_channels),\n            BasicBlock(mid_channels, mid_channels),\n            ASPP(mid_channels, mid_channels),\n            build_conv_layer(cfg=dict(\n                type='DCN',\n                in_channels=mid_channels,\n                out_channels=mid_channels,\n                kernel_size=3,\n                padding=1,\n                groups=4,\n                im2col_step=128,\n            )),\n            nn.Conv2d(mid_channels,\n                      depth_channels,\n                      kernel_size=1,\n                      stride=1,\n                      padding=0),\n        )\n\n    def forward(self, x, mats_dict):\n        intrins = mats_dict['intrin_mats'][:, 0:1, ..., :3, :3]\n        batch_size = intrins.shape[0]\n        num_cams = intrins.shape[2]\n        ida = mats_dict['ida_mats'][:, 0:1, ...]\n        sensor2ego = mats_dict['sensor2ego_mats'][:, 0:1, ..., :3, :]\n        bda = mats_dict['bda_mat'].view(batch_size, 1, 1, 4,\n                                        4).repeat(1, 1, num_cams, 1, 1)\n        mlp_input = torch.cat(\n            [\n                torch.stack(\n                    [\n                        intrins[:, 0:1, ..., 0, 0],\n                        intrins[:, 0:1, ..., 1, 1],\n                        intrins[:, 0:1, ..., 0, 2],\n                        intrins[:, 0:1, ..., 1, 2],\n                        ida[:, 0:1, ..., 0, 0],\n                        ida[:, 0:1, ..., 0, 1],\n                        ida[:, 0:1, ..., 0, 3],\n                        ida[:, 0:1, ..., 1, 0],\n                        ida[:, 0:1, ..., 1, 1],\n                        ida[:, 0:1, ..., 1, 3],\n                        bda[:, 0:1, ..., 0, 0],\n                        bda[:, 0:1, ..., 0, 1],\n                        bda[:, 0:1, ..., 1, 0],\n                        bda[:, 0:1, ..., 1, 1],\n                        bda[:, 0:1, ..., 2, 2],\n                    ],\n                    dim=-1,\n                ),\n                sensor2ego.view(batch_size, 1, num_cams, -1),\n            ],\n            -1,\n        )\n        mlp_input = self.bn(mlp_input.reshape(-1, mlp_input.shape[-1]))\n        x = self.reduce_conv(x)\n        context_se = self.context_mlp(mlp_input)[..., None, None]\n        context = self.context_se(x, context_se)\n        context = self.context_conv(context)\n        depth_se = self.depth_mlp(mlp_input)[..., None, None]\n        depth = self.depth_se(x, depth_se)\n        depth = self.depth_conv(depth)\n        return torch.cat([depth, context], dim=1)\n\n\nclass DepthAggregation(nn.Module):\n    \"\"\"\n    pixel cloud feature extraction\n    \"\"\"\n\n    def __init__(self, in_channels, mid_channels, out_channels):\n        super(DepthAggregation, self).__init__()\n\n        self.reduce_conv = nn.Sequential(\n            nn.Conv2d(in_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1,\n                      bias=False),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n\n        self.conv = nn.Sequential(\n            nn.Conv2d(mid_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1,\n                      bias=False),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(mid_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1,\n                      bias=False),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n\n        self.out_conv = nn.Sequential(\n            nn.Conv2d(mid_channels,\n                      out_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1,\n                      bias=True),\n            # nn.BatchNorm3d(out_channels),\n            # nn.ReLU(inplace=True),\n        )\n\n    @autocast(False)\n    def forward(self, x):\n        x = self.reduce_conv(x)\n        x = self.conv(x) + x\n        x = self.out_conv(x)\n        return x\n\n\nclass BaseLSSFPN(nn.Module):\n\n    def __init__(self,\n                 x_bound,\n                 y_bound,\n                 z_bound,\n                 d_bound,\n                 final_dim,\n                 downsample_factor,\n                 output_channels,\n                 img_backbone_conf,\n                 img_neck_conf,\n                 depth_net_conf,\n                 use_da=False):\n        \"\"\"Modified from `https://github.com/nv-tlabs/lift-splat-shoot`.\n\n        Args:\n            x_bound (list): Boundaries for x.\n            y_bound (list): Boundaries for y.\n            z_bound (list): Boundaries for z.\n            d_bound (list): Boundaries for d.\n            final_dim (list): Dimension for input images.\n            downsample_factor (int): Downsample factor between feature map\n                and input image.\n            output_channels (int): Number of channels for the output\n                feature map.\n            img_backbone_conf (dict): Config for image backbone.\n            img_neck_conf (dict): Config for image neck.\n            depth_net_conf (dict): Config for depth net.\n        \"\"\"\n\n        super(BaseLSSFPN, self).__init__()\n        self.downsample_factor = downsample_factor\n        self.d_bound = d_bound\n        self.final_dim = final_dim\n        self.output_channels = output_channels\n\n        self.register_buffer(\n            'voxel_size',\n            torch.Tensor([row[2] for row in [x_bound, y_bound, z_bound]]))\n        self.register_buffer(\n            'voxel_coord',\n            torch.Tensor([\n                row[0] + row[2] / 2.0 for row in [x_bound, y_bound, z_bound]\n            ]))\n        self.register_buffer(\n            'voxel_num',\n            torch.LongTensor([(row[1] - row[0]) / row[2]\n                              for row in [x_bound, y_bound, z_bound]]))\n        self.register_buffer('frustum', self.create_frustum())\n        self.depth_channels, _, _, _ = self.frustum.shape\n\n        self.img_backbone = build_backbone(img_backbone_conf)\n        self.img_neck = build_neck(img_neck_conf)\n        self.depth_net = self._configure_depth_net(depth_net_conf)\n\n        self.img_neck.init_weights()\n        self.img_backbone.init_weights()\n        self.use_da = use_da\n        if self.use_da:\n            self.depth_aggregation_net = self._configure_depth_aggregation_net(\n            )\n\n    def _configure_depth_net(self, depth_net_conf):\n        return DepthNet(\n            depth_net_conf['in_channels'],\n            depth_net_conf['mid_channels'],\n            self.output_channels,\n            self.depth_channels,\n        )\n\n    def _configure_depth_aggregation_net(self):\n        \"\"\"build pixel cloud feature extractor\"\"\"\n        return DepthAggregation(self.output_channels, self.output_channels,\n                                self.output_channels)\n\n    def _forward_voxel_net(self, img_feat_with_depth):\n        if self.use_da:\n            # BEVConv2D [n, c, d, h, w] -> [n, h, c, w, d]\n            img_feat_with_depth = img_feat_with_depth.permute(\n                0, 3, 1, 4,\n                2).contiguous()  # [n, c, d, h, w] -> [n, h, c, w, d]\n            n, h, c, w, d = img_feat_with_depth.shape\n            img_feat_with_depth = img_feat_with_depth.view(-1, c, w, d)\n            img_feat_with_depth = (\n                self.depth_aggregation_net(img_feat_with_depth).view(\n                    n, h, c, w, d).permute(0, 2, 4, 1, 3).contiguous())\n        return img_feat_with_depth\n\n    def create_frustum(self):\n        \"\"\"Generate frustum\"\"\"\n        # make grid in image plane\n        ogfH, ogfW = self.final_dim\n        fH, fW = ogfH // self.downsample_factor, ogfW // self.downsample_factor\n        d_coords = torch.arange(*self.d_bound,\n                                dtype=torch.float).view(-1, 1,\n                                                        1).expand(-1, fH, fW)\n        D, _, _ = d_coords.shape\n        x_coords = torch.linspace(0, ogfW - 1, fW, dtype=torch.float).view(\n            1, 1, fW).expand(D, fH, fW)\n        y_coords = torch.linspace(0, ogfH - 1, fH,\n                                  dtype=torch.float).view(1, fH,\n                                                          1).expand(D, fH, fW)\n        paddings = torch.ones_like(d_coords)\n\n        # D x H x W x 3\n        frustum = torch.stack((x_coords, y_coords, d_coords, paddings), -1)\n        return frustum\n\n    def get_geometry(self, sensor2ego_mat, intrin_mat, ida_mat, bda_mat):\n        \"\"\"Transfer points from camera coord to ego coord.\n\n        Args:\n            rots(Tensor): Rotation matrix from camera to ego.\n            trans(Tensor): Translation matrix from camera to ego.\n            intrins(Tensor): Intrinsic matrix.\n            post_rots_ida(Tensor): Rotation matrix for ida.\n            post_trans_ida(Tensor): Translation matrix for ida\n            post_rot_bda(Tensor): Rotation matrix for bda.\n\n        Returns:\n            Tensors: points ego coord.\n        \"\"\"\n        batch_size, num_cams, _, _ = sensor2ego_mat.shape\n\n        # undo post-transformation\n        # B x N x D x H x W x 3\n        points = self.frustum\n        ida_mat = ida_mat.view(batch_size, num_cams, 1, 1, 1, 4, 4)\n        points = ida_mat.inverse().matmul(points.unsqueeze(-1))\n        # cam_to_ego\n        points = torch.cat(\n            (points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3],\n             points[:, :, :, :, :, 2:]), 5)\n\n        combine = sensor2ego_mat.matmul(torch.inverse(intrin_mat))\n        points = combine.view(batch_size, num_cams, 1, 1, 1, 4,\n                              4).matmul(points)\n        if bda_mat is not None:\n            bda_mat = bda_mat.unsqueeze(1).repeat(1, num_cams, 1, 1).view(\n                batch_size, num_cams, 1, 1, 1, 4, 4)\n            points = (bda_mat @ points).squeeze(-1)\n        else:\n            points = points.squeeze(-1)\n        return points[..., :3]\n\n    def get_cam_feats(self, imgs):\n        \"\"\"Get feature maps from images.\"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, imH, imW = imgs.shape\n\n        imgs = imgs.flatten().view(batch_size * num_sweeps * num_cams,\n                                   num_channels, imH, imW)\n        img_feats = self.img_neck(self.img_backbone(imgs))[0]\n        img_feats = img_feats.reshape(batch_size, num_sweeps, num_cams,\n                                      img_feats.shape[1], img_feats.shape[2],\n                                      img_feats.shape[3])\n        return img_feats\n\n    def _forward_depth_net(self, feat, mats_dict):\n        return self.depth_net(feat, mats_dict)\n\n    def _forward_single_sweep(self,\n                              sweep_index,\n                              sweep_imgs,\n                              mats_dict,\n                              is_return_depth=False):\n        \"\"\"Forward function for single sweep.\n\n        Args:\n            sweep_index (int): Index of sweeps.\n            sweep_imgs (Tensor): Input images.\n            mats_dict (dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            is_return_depth (bool, optional): Whether to return depth.\n                Default: False.\n\n        Returns:\n            Tensor: BEV feature map.\n        \"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n            img_width = sweep_imgs.shape\n        img_feats = self.get_cam_feats(sweep_imgs)\n        source_features = img_feats[:, 0, ...]\n        depth_feature = self._forward_depth_net(\n            source_features.reshape(batch_size * num_cams,\n                                    source_features.shape[2],\n                                    source_features.shape[3],\n                                    source_features.shape[4]),\n            mats_dict,\n        )\n        depth = depth_feature[:, :self.depth_channels].softmax(\n            dim=1, dtype=depth_feature.dtype)\n        geom_xyz = self.get_geometry(\n            mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n            mats_dict['intrin_mats'][:, sweep_index, ...],\n            mats_dict['ida_mats'][:, sweep_index, ...],\n            mats_dict.get('bda_mat', None),\n        )\n        geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n                    self.voxel_size).int()\n        if self.training or self.use_da:\n            img_feat_with_depth = depth.unsqueeze(\n                1) * depth_feature[:, self.depth_channels:(\n                    self.depth_channels + self.output_channels)].unsqueeze(2)\n\n            img_feat_with_depth = self._forward_voxel_net(img_feat_with_depth)\n\n            img_feat_with_depth = img_feat_with_depth.reshape(\n                batch_size,\n                num_cams,\n                img_feat_with_depth.shape[1],\n                img_feat_with_depth.shape[2],\n                img_feat_with_depth.shape[3],\n                img_feat_with_depth.shape[4],\n            )\n\n            img_feat_with_depth = img_feat_with_depth.permute(0, 1, 3, 4, 5, 2)\n\n            feature_map = voxel_pooling_train(geom_xyz,\n                                              img_feat_with_depth.contiguous(),\n                                              self.voxel_num.cuda())\n        else:\n            feature_map = voxel_pooling_inference(\n                geom_xyz, depth, depth_feature[:, self.depth_channels:(\n                    self.depth_channels + self.output_channels)].contiguous(),\n                self.voxel_num.cuda())\n        if is_return_depth:\n            # final_depth has to be fp32, otherwise the depth\n            # loss will colapse during the traing process.\n            return feature_map.contiguous(\n            ), depth_feature[:, :self.depth_channels].softmax(dim=1)\n        return feature_map.contiguous()\n\n    def forward(self,\n                sweep_imgs,\n                mats_dict,\n                timestamps=None,\n                is_return_depth=False):\n        \"\"\"Forward function.\n\n        Args:\n            sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n                num_cameras, 3, H, W).\n            mats_dict(dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            timestamps(Tensor): Timestamp for all images with the shape of(B,\n                num_sweeps, num_cameras).\n\n        Return:\n            Tensor: bev feature map.\n        \"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n            img_width = sweep_imgs.shape\n\n        key_frame_res = self._forward_single_sweep(\n            0,\n            sweep_imgs[:, 0:1, ...],\n            mats_dict,\n            is_return_depth=is_return_depth)\n        if num_sweeps == 1:\n            return key_frame_res\n\n        key_frame_feature = key_frame_res[\n            0] if is_return_depth else key_frame_res\n\n        ret_feature_list = [key_frame_feature]\n        for sweep_index in range(1, num_sweeps):\n            with torch.no_grad():\n                feature_map = self._forward_single_sweep(\n                    sweep_index,\n                    sweep_imgs[:, sweep_index:sweep_index + 1, ...],\n                    mats_dict,\n                    is_return_depth=False)\n                ret_feature_list.append(feature_map)\n\n        if is_return_depth:\n            return torch.cat(ret_feature_list, 1), key_frame_res[1]\n        else:\n            return torch.cat(ret_feature_list, 1)\n"
  },
  {
    "path": "bevdepth/layers/backbones/bevstereo_lss_fpn.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import build_conv_layer\nfrom mmdet.models.backbones.resnet import BasicBlock\nfrom scipy.special import erf\nfrom scipy.stats import norm\nfrom torch import nn\n\nfrom bevdepth.layers.backbones.base_lss_fpn import (ASPP, BaseLSSFPN, Mlp,\n                                                    SELayer)\n\ntry:\n    from bevdepth.ops.voxel_pooling_inference import voxel_pooling_inference\n    from bevdepth.ops.voxel_pooling_train import voxel_pooling_train\nexcept ImportError:\n    print('Import VoxelPooling fail.')\n\n__all__ = ['BEVStereoLSSFPN']\n\n\nclass ConvBnReLU3D(nn.Module):\n    \"\"\"Implements of 3d convolution + batch normalization + ReLU.\"\"\"\n\n    def __init__(\n        self,\n        in_channels: int,\n        out_channels: int,\n        kernel_size: int = 3,\n        stride: int = 1,\n        pad: int = 1,\n        dilation: int = 1,\n    ) -> None:\n        \"\"\"initialization method for convolution3D +\n            batch normalization + relu module\n        Args:\n            in_channels: input channel number of convolution layer\n            out_channels: output channel number of convolution layer\n            kernel_size: kernel size of convolution layer\n            stride: stride of convolution layer\n            pad: pad of convolution layer\n            dilation: dilation of convolution layer\n        \"\"\"\n        super(ConvBnReLU3D, self).__init__()\n        self.conv = nn.Conv3d(in_channels,\n                              out_channels,\n                              kernel_size,\n                              stride=stride,\n                              padding=pad,\n                              dilation=dilation,\n                              bias=False)\n        self.bn = nn.BatchNorm3d(out_channels)\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        \"\"\"forward method\"\"\"\n        return F.relu(self.bn(self.conv(x)), inplace=True)\n\n\nclass DepthNet(nn.Module):\n\n    def __init__(self,\n                 in_channels,\n                 mid_channels,\n                 context_channels,\n                 depth_channels,\n                 d_bound,\n                 num_ranges=4):\n        super(DepthNet, self).__init__()\n        self.reduce_conv = nn.Sequential(\n            nn.Conv2d(in_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n        self.context_conv = nn.Conv2d(mid_channels,\n                                      context_channels,\n                                      kernel_size=1,\n                                      stride=1,\n                                      padding=0)\n        self.bn = nn.BatchNorm1d(27)\n        self.depth_mlp = Mlp(27, mid_channels, mid_channels)\n        self.depth_se = SELayer(mid_channels)  # NOTE: add camera-aware\n        self.context_mlp = Mlp(27, mid_channels, mid_channels)\n        self.context_se = SELayer(mid_channels)  # NOTE: add camera-aware\n        self.depth_feat_conv = nn.Sequential(\n            BasicBlock(mid_channels, mid_channels),\n            BasicBlock(mid_channels, mid_channels),\n            ASPP(mid_channels, mid_channels),\n            build_conv_layer(cfg=dict(\n                type='DCN',\n                in_channels=mid_channels,\n                out_channels=mid_channels,\n                kernel_size=3,\n                padding=1,\n                groups=4,\n                im2col_step=128,\n            )),\n        )\n        self.mu_sigma_range_net = nn.Sequential(\n            BasicBlock(mid_channels, mid_channels),\n            nn.ConvTranspose2d(mid_channels,\n                               mid_channels,\n                               3,\n                               stride=2,\n                               padding=1,\n                               output_padding=1),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n            nn.ConvTranspose2d(mid_channels,\n                               mid_channels,\n                               3,\n                               stride=2,\n                               padding=1,\n                               output_padding=1),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(mid_channels,\n                      num_ranges * 3,\n                      kernel_size=1,\n                      stride=1,\n                      padding=0),\n        )\n        self.mono_depth_net = nn.Sequential(\n            BasicBlock(mid_channels, mid_channels),\n            nn.Conv2d(mid_channels,\n                      depth_channels,\n                      kernel_size=1,\n                      stride=1,\n                      padding=0),\n        )\n        self.d_bound = d_bound\n        self.num_ranges = num_ranges\n\n    # @autocast(False)\n    def forward(self, x, mats_dict, scale_depth_factor=1000.0):\n        B, _, H, W = x.shape\n        intrins = mats_dict['intrin_mats'][:, 0:1, ..., :3, :3]\n        batch_size = intrins.shape[0]\n        num_cams = intrins.shape[2]\n        ida = mats_dict['ida_mats'][:, 0:1, ...]\n        sensor2ego = mats_dict['sensor2ego_mats'][:, 0:1, ..., :3, :]\n        bda = mats_dict['bda_mat'].view(batch_size, 1, 1, 4,\n                                        4).repeat(1, 1, num_cams, 1, 1)\n        mlp_input = torch.cat(\n            [\n                torch.stack(\n                    [\n                        intrins[:, 0:1, ..., 0, 0],\n                        intrins[:, 0:1, ..., 1, 1],\n                        intrins[:, 0:1, ..., 0, 2],\n                        intrins[:, 0:1, ..., 1, 2],\n                        ida[:, 0:1, ..., 0, 0],\n                        ida[:, 0:1, ..., 0, 1],\n                        ida[:, 0:1, ..., 0, 3],\n                        ida[:, 0:1, ..., 1, 0],\n                        ida[:, 0:1, ..., 1, 1],\n                        ida[:, 0:1, ..., 1, 3],\n                        bda[:, 0:1, ..., 0, 0],\n                        bda[:, 0:1, ..., 0, 1],\n                        bda[:, 0:1, ..., 1, 0],\n                        bda[:, 0:1, ..., 1, 1],\n                        bda[:, 0:1, ..., 2, 2],\n                    ],\n                    dim=-1,\n                ),\n                sensor2ego.view(batch_size, 1, num_cams, -1),\n            ],\n            -1,\n        )\n        mlp_input = self.bn(mlp_input.reshape(-1, mlp_input.shape[-1]))\n        x = self.reduce_conv(x)\n        context_se = self.context_mlp(mlp_input)[..., None, None]\n        context = self.context_se(x, context_se)\n        context = self.context_conv(context)\n        depth_se = self.depth_mlp(mlp_input)[..., None, None]\n        depth_feat = self.depth_se(x, depth_se)\n        depth_feat = self.depth_feat_conv(depth_feat)\n        mono_depth = self.mono_depth_net(depth_feat)\n        mu_sigma_score = self.mu_sigma_range_net(depth_feat)\n        d_coords = torch.arange(*self.d_bound,\n                                dtype=torch.float).reshape(1, -1, 1, 1).cuda()\n        d_coords = d_coords.repeat(B, 1, H, W)\n        mu = mu_sigma_score[:, 0:self.num_ranges, ...]\n        sigma = mu_sigma_score[:, self.num_ranges:2 * self.num_ranges, ...]\n        range_score = mu_sigma_score[:,\n                                     2 * self.num_ranges:3 * self.num_ranges,\n                                     ...]\n        sigma = F.elu(sigma) + 1.0 + 1e-10\n        return x, context, mu, sigma, range_score, mono_depth\n\n\nclass BEVStereoLSSFPN(BaseLSSFPN):\n\n    def __init__(self,\n                 x_bound,\n                 y_bound,\n                 z_bound,\n                 d_bound,\n                 final_dim,\n                 downsample_factor,\n                 output_channels,\n                 img_backbone_conf,\n                 img_neck_conf,\n                 depth_net_conf,\n                 use_da=False,\n                 sampling_range=3,\n                 num_samples=3,\n                 stereo_downsample_factor=4,\n                 em_iteration=3,\n                 min_sigma=1,\n                 num_groups=8,\n                 num_ranges=4,\n                 range_list=[[2, 8], [8, 16], [16, 28], [28, 58]],\n                 k_list=None,\n                 use_mask=True):\n        \"\"\"Modified from `https://github.com/nv-tlabs/lift-splat-shoot`.\n        Args:\n            x_bound (list): Boundaries for x.\n            y_bound (list): Boundaries for y.\n            z_bound (list): Boundaries for z.\n            d_bound (list): Boundaries for d.\n            final_dim (list): Dimension for input images.\n            downsample_factor (int): Downsample factor between feature map\n                and input image.\n            output_channels (int): Number of channels for the output\n                feature map.\n            img_backbone_conf (dict): Config for image backbone.\n            img_neck_conf (dict): Config for image neck.\n            depth_net_conf (dict): Config for depth net.\n            sampling_range (int): The base range of sampling candidates.\n                Defaults to 3.\n            num_samples (int): Number of samples. Defaults to 3.\n            stereo_downsample_factor (int): Downsample factor from input image\n                and stereo depth. Defaults to 4.\n            em_iteration (int): Number of iterations for em. Defaults to 3.\n            min_sigma (float): Minimal value for sigma. Defaults to 1.\n            num_groups (int): Number of groups to keep after inner product.\n                Defaults to 8.\n            num_ranges (int): Number of split ranges. Defaults to 1.\n            range_list (list): Start and end of every range, Defaults to None.\n            k_list (list): Depth of all candidates inside the range.\n                Defaults to None.\n            use_mask (bool): Whether to use mask_net. Defaults to True.\n        \"\"\"\n        self.num_ranges = num_ranges\n        self.sampling_range = sampling_range\n        self.num_samples = num_samples\n        super(BEVStereoLSSFPN,\n              self).__init__(x_bound, y_bound, z_bound, d_bound, final_dim,\n                             downsample_factor, output_channels,\n                             img_backbone_conf, img_neck_conf, depth_net_conf,\n                             use_da)\n\n        self.depth_channels, _, _, _ = self.frustum.shape\n        self.use_mask = use_mask\n        if k_list is None:\n            self.register_buffer('k_list', torch.Tensor(self.depth_sampling()))\n        else:\n            self.register_buffer('k_list', torch.Tensor(k_list))\n        self.stereo_downsample_factor = stereo_downsample_factor\n        self.em_iteration = em_iteration\n        self.register_buffer(\n            'depth_values',\n            torch.arange((self.d_bound[1] - self.d_bound[0]) / self.d_bound[2],\n                         dtype=torch.float))\n        self.num_groups = num_groups\n        self.similarity_net = nn.Sequential(\n            ConvBnReLU3D(in_channels=num_groups,\n                         out_channels=16,\n                         kernel_size=1,\n                         stride=1,\n                         pad=0),\n            ConvBnReLU3D(in_channels=16,\n                         out_channels=8,\n                         kernel_size=1,\n                         stride=1,\n                         pad=0),\n            nn.Conv3d(in_channels=8,\n                      out_channels=1,\n                      kernel_size=1,\n                      stride=1,\n                      padding=0),\n        )\n        if range_list is None:\n            range_length = (d_bound[1] - d_bound[0]) / num_ranges\n            self.range_list = [[\n                d_bound[0] + range_length * i,\n                d_bound[0] + range_length * (i + 1)\n            ] for i in range(num_ranges)]\n        else:\n            assert len(range_list) == num_ranges\n            self.range_list = range_list\n\n        self.min_sigma = min_sigma\n        self.depth_downsample_net = nn.Sequential(\n            nn.Conv2d(self.depth_channels, 256, 3, 2, 1),\n            nn.BatchNorm2d(256),\n            nn.ReLU(),\n            nn.Conv2d(256, 256, 3, 2, 1),\n            nn.BatchNorm2d(256),\n            nn.ReLU(),\n            nn.Conv2d(256, self.depth_channels, 1, 1, 0),\n        )\n        self.context_downsample_net = nn.Identity()\n        if self.use_mask:\n            self.mask_net = nn.Sequential(\n                nn.Conv2d(224, 64, 3, 1, 1),\n                nn.BatchNorm2d(64),\n                nn.ReLU(inplace=True),\n                BasicBlock(64, 64),\n                BasicBlock(64, 64),\n                nn.Conv2d(64, 1, 1, 1, 0),\n                nn.Sigmoid(),\n            )\n\n    def depth_sampling(self):\n        \"\"\"Generate sampling range of candidates.\n        Returns:\n            list[float]: List of all candidates.\n        \"\"\"\n        P_total = erf(self.sampling_range /\n                      np.sqrt(2))  # Probability covered by the sampling range\n        idx_list = np.arange(0, self.num_samples + 1)\n        p_list = (1 - P_total) / 2 + ((idx_list / self.num_samples) * P_total)\n        k_list = norm.ppf(p_list)\n        k_list = (k_list[1:] + k_list[:-1]) / 2\n        return list(k_list)\n\n    def _generate_cost_volume(\n        self,\n        sweep_index,\n        stereo_feats_all_sweeps,\n        mats_dict,\n        depth_sample,\n        depth_sample_frustum,\n        sensor2sensor_mats,\n    ):\n        \"\"\"Generate cost volume based on depth sample.\n        Args:\n            sweep_index (int): Index of sweep.\n            stereo_feats_all_sweeps (list[Tensor]): Stereo feature\n                of all sweeps.\n            mats_dict (dict):\n                sensor2ego_mats (Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats (Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats (Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats (Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat (Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            depth_sample (Tensor): Depth map of all candidates.\n            depth_sample_frustum (Tensor): Pre-generated frustum.\n            sensor2sensor_mats (Tensor): Transformation matrix from reference\n                sensor to source sensor.\n        Returns:\n            Tensor: Depth score for all sweeps.\n        \"\"\"\n        batch_size, num_channels, height, width = stereo_feats_all_sweeps[\n            0].shape\n        num_sweeps = len(stereo_feats_all_sweeps)\n        depth_score_all_sweeps = list()\n        for idx in range(num_sweeps):\n            if idx == sweep_index:\n                continue\n            warped_stereo_fea = self.homo_warping(\n                stereo_feats_all_sweeps[idx],\n                mats_dict['intrin_mats'][:, sweep_index, ...],\n                mats_dict['intrin_mats'][:, idx, ...],\n                sensor2sensor_mats[idx],\n                mats_dict['ida_mats'][:, sweep_index, ...],\n                mats_dict['ida_mats'][:, idx, ...],\n                depth_sample,\n                depth_sample_frustum.type_as(stereo_feats_all_sweeps[idx]),\n            )\n            warped_stereo_fea = warped_stereo_fea.reshape(\n                batch_size, self.num_groups, num_channels // self.num_groups,\n                self.num_samples, height, width)\n            ref_stereo_feat = stereo_feats_all_sweeps[sweep_index].reshape(\n                batch_size, self.num_groups, num_channels // self.num_groups,\n                height, width)\n            feat_cost = torch.mean(\n                (ref_stereo_feat.unsqueeze(3) * warped_stereo_fea), axis=2)\n            depth_score = self.similarity_net(feat_cost).squeeze(1)\n            depth_score_all_sweeps.append(depth_score)\n        return torch.stack(depth_score_all_sweeps).mean(0)\n\n    def homo_warping(\n        self,\n        stereo_feat,\n        key_intrin_mats,\n        sweep_intrin_mats,\n        sensor2sensor_mats,\n        key_ida_mats,\n        sweep_ida_mats,\n        depth_sample,\n        frustum,\n    ):\n        \"\"\"Used for mvs method to transfer sweep image feature to\n            key image feature.\n        Args:\n            src_fea(Tensor): image features.\n            key_intrin_mats(Tensor): Intrin matrix for key sensor.\n            sweep_intrin_mats(Tensor): Intrin matrix for sweep sensor.\n            sensor2sensor_mats(Tensor): Transformation matrix from key\n                sensor to sweep sensor.\n            key_ida_mats(Tensor): Ida matrix for key frame.\n            sweep_ida_mats(Tensor): Ida matrix for sweep frame.\n            depth_sample (Tensor): Depth map of all candidates.\n            depth_sample_frustum (Tensor): Pre-generated frustum.\n        \"\"\"\n        batch_size_with_num_cams, channels = stereo_feat.shape[\n            0], stereo_feat.shape[1]\n        height, width = stereo_feat.shape[2], stereo_feat.shape[3]\n        with torch.no_grad():\n            points = frustum\n            points = points.reshape(points.shape[0], -1, points.shape[-1])\n            points[..., 2] = 1\n            # Undo ida for key frame.\n            points = key_ida_mats.reshape(batch_size_with_num_cams, *\n                                          key_ida_mats.shape[2:]).inverse(\n                                          ).unsqueeze(1) @ points.unsqueeze(-1)\n            # Convert points from pixel coord to key camera coord.\n            points[..., :3, :] *= depth_sample.reshape(\n                batch_size_with_num_cams, -1, 1, 1)\n            num_depth = frustum.shape[1]\n            points = (key_intrin_mats.reshape(\n                batch_size_with_num_cams, *\n                key_intrin_mats.shape[2:]).inverse().unsqueeze(1) @ points)\n            points = (sensor2sensor_mats.reshape(\n                batch_size_with_num_cams, *\n                sensor2sensor_mats.shape[2:]).unsqueeze(1) @ points)\n            # points in sweep sensor coord.\n            points = (sweep_intrin_mats.reshape(\n                batch_size_with_num_cams, *\n                sweep_intrin_mats.shape[2:]).unsqueeze(1) @ points)\n            # points in sweep pixel coord.\n            points[..., :2, :] = points[..., :2, :] / points[\n                ..., 2:3, :]  # [B, 2, Ndepth, H*W]\n\n            points = (sweep_ida_mats.reshape(\n                batch_size_with_num_cams, *\n                sweep_ida_mats.shape[2:]).unsqueeze(1) @ points).squeeze(-1)\n            neg_mask = points[..., 2] < 1e-3\n            points[..., 0][neg_mask] = width * self.stereo_downsample_factor\n            points[..., 1][neg_mask] = height * self.stereo_downsample_factor\n            points[..., 2][neg_mask] = 1\n            proj_x_normalized = points[..., 0] / (\n                (width * self.stereo_downsample_factor - 1) / 2) - 1\n            proj_y_normalized = points[..., 1] / (\n                (height * self.stereo_downsample_factor - 1) / 2) - 1\n            grid = torch.stack([proj_x_normalized, proj_y_normalized],\n                               dim=2)  # [B, Ndepth, H*W, 2]\n\n        warped_stereo_fea = F.grid_sample(\n            stereo_feat,\n            grid.view(batch_size_with_num_cams, num_depth * height, width, 2),\n            mode='bilinear',\n            padding_mode='zeros',\n        )\n        warped_stereo_fea = warped_stereo_fea.view(batch_size_with_num_cams,\n                                                   channels, num_depth, height,\n                                                   width)\n\n        return warped_stereo_fea\n\n    def _forward_stereo(\n        self,\n        sweep_index,\n        stereo_feats_all_sweeps,\n        mono_depth_all_sweeps,\n        mats_dict,\n        sensor2sensor_mats,\n        mu_all_sweeps,\n        sigma_all_sweeps,\n        range_score_all_sweeps,\n        depth_feat_all_sweeps,\n    ):\n        \"\"\"Forward function to generate stereo depth.\n        Args:\n            sweep_index (int): Index of sweep.\n            stereo_feats_all_sweeps (list[Tensor]): Stereo feature\n                of all sweeps.\n            mono_depth_all_sweeps (list[Tensor]):\n            mats_dict (dict):\n                sensor2ego_mats (Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats (Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats (Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats (Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat (Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            sensor2sensor_mats(Tensor): Transformation matrix from key\n                sensor to sweep sensor.\n            mu_all_sweeps (list[Tensor]): List of mu for all sweeps.\n            sigma_all_sweeps (list[Tensor]): List of sigma for all sweeps.\n            range_score_all_sweeps (list[Tensor]): List of all range score\n                for all sweeps.\n            depth_feat_all_sweeps (list[Tensor]): List of all depth feat for\n                all sweeps.\n        Returns:\n            Tensor: stereo_depth\n        \"\"\"\n        batch_size_with_cams, _, feat_height, feat_width = \\\n            stereo_feats_all_sweeps[0].shape\n        device = stereo_feats_all_sweeps[0].device\n        d_coords = torch.arange(*self.d_bound,\n                                dtype=torch.float,\n                                device=device).reshape(1, -1, 1, 1)\n        d_coords = d_coords.repeat(batch_size_with_cams, 1, feat_height,\n                                   feat_width)\n        stereo_depth = stereo_feats_all_sweeps[0].new_zeros(\n            batch_size_with_cams, self.depth_channels, feat_height, feat_width)\n        mask_score = stereo_feats_all_sweeps[0].new_zeros(\n            batch_size_with_cams,\n            self.depth_channels,\n            feat_height * self.stereo_downsample_factor //\n            self.downsample_factor,\n            feat_width * self.stereo_downsample_factor //\n            self.downsample_factor,\n        )\n        score_all_ranges = list()\n        range_score = range_score_all_sweeps[sweep_index].softmax(1)\n        for range_idx in range(self.num_ranges):\n            # Map mu to the corresponding interval.\n            range_start = self.range_list[range_idx][0]\n            mu_all_sweeps_single_range = [\n                mu[:, range_idx:range_idx + 1, ...].sigmoid() *\n                (self.range_list[range_idx][1] - self.range_list[range_idx][0])\n                + range_start for mu in mu_all_sweeps\n            ]\n            sigma_all_sweeps_single_range = [\n                sigma[:, range_idx:range_idx + 1, ...]\n                for sigma in sigma_all_sweeps\n            ]\n            batch_size_with_cams, _, feat_height, feat_width =\\\n                stereo_feats_all_sweeps[0].shape\n            mu = mu_all_sweeps_single_range[sweep_index]\n            sigma = sigma_all_sweeps_single_range[sweep_index]\n            for _ in range(self.em_iteration):\n                depth_sample = torch.cat([mu + sigma * k for k in self.k_list],\n                                         1)\n                depth_sample_frustum = self.create_depth_sample_frustum(\n                    depth_sample, self.stereo_downsample_factor)\n                mu_score = self._generate_cost_volume(\n                    sweep_index,\n                    stereo_feats_all_sweeps,\n                    mats_dict,\n                    depth_sample,\n                    depth_sample_frustum,\n                    sensor2sensor_mats,\n                )\n                mu_score = mu_score.softmax(1)\n                scale_factor = torch.clamp(\n                    0.5 / (1e-4 + mu_score[:, self.num_samples //\n                                           2:self.num_samples // 2 + 1, ...]),\n                    min=0.1,\n                    max=10)\n\n                sigma = torch.clamp(sigma * scale_factor, min=0.1, max=10)\n                mu = (depth_sample * mu_score).sum(1, keepdim=True)\n                del depth_sample\n                del depth_sample_frustum\n            range_length = int(\n                (self.range_list[range_idx][1] - self.range_list[range_idx][0])\n                // self.d_bound[2])\n            if self.use_mask:\n                depth_sample = F.avg_pool2d(\n                    mu,\n                    self.downsample_factor // self.stereo_downsample_factor,\n                    self.downsample_factor // self.stereo_downsample_factor,\n                )\n                depth_sample_frustum = self.create_depth_sample_frustum(\n                    depth_sample, self.downsample_factor)\n                mask = self._forward_mask(\n                    sweep_index,\n                    mono_depth_all_sweeps,\n                    mats_dict,\n                    depth_sample,\n                    depth_sample_frustum,\n                    sensor2sensor_mats,\n                )\n                mask_score[:,\n                           int((range_start - self.d_bound[0]) //\n                               self.d_bound[2]):range_length +\n                           int((range_start - self.d_bound[0]) //\n                               self.d_bound[2]), ..., ] += mask\n                del depth_sample\n                del depth_sample_frustum\n            sigma = torch.clamp(sigma, self.min_sigma)\n            mu_repeated = mu.repeat(1, range_length, 1, 1)\n            eps = 1e-6\n            depth_score_single_range = (-1 / 2 * (\n                (d_coords[:,\n                          int((range_start - self.d_bound[0]) //\n                              self.d_bound[2]):range_length + int(\n                                  (range_start - self.d_bound[0]) //\n                                  self.d_bound[2]), ..., ] - mu_repeated) /\n                torch.sqrt(sigma))**2)\n            depth_score_single_range = depth_score_single_range.exp()\n            score_all_ranges.append(mu_score.sum(1).unsqueeze(1))\n            depth_score_single_range = depth_score_single_range / (\n                sigma * math.sqrt(2 * math.pi) + eps)\n            stereo_depth[:,\n                         int((range_start - self.d_bound[0]) //\n                             self.d_bound[2]):range_length +\n                         int((range_start - self.d_bound[0]) //\n                             self.d_bound[2]), ..., ] = (\n                                 depth_score_single_range *\n                                 range_score[:, range_idx:range_idx + 1, ...])\n            del depth_score_single_range\n            del mu_repeated\n        if self.use_mask:\n            return stereo_depth, mask_score\n        else:\n            return stereo_depth\n\n    def create_depth_sample_frustum(self, depth_sample, downsample_factor=16):\n        \"\"\"Generate frustum\"\"\"\n        # make grid in image plane\n        ogfH, ogfW = self.final_dim\n        fH, fW = ogfH // downsample_factor, ogfW // downsample_factor\n        batch_size, num_depth, _, _ = depth_sample.shape\n        x_coords = (torch.linspace(0,\n                                   ogfW - 1,\n                                   fW,\n                                   dtype=torch.float,\n                                   device=depth_sample.device).view(\n                                       1, 1, 1,\n                                       fW).expand(batch_size, num_depth, fH,\n                                                  fW))\n        y_coords = (torch.linspace(0,\n                                   ogfH - 1,\n                                   fH,\n                                   dtype=torch.float,\n                                   device=depth_sample.device).view(\n                                       1, 1, fH,\n                                       1).expand(batch_size, num_depth, fH,\n                                                 fW))\n        paddings = torch.ones_like(depth_sample)\n\n        # D x H x W x 3\n        frustum = torch.stack((x_coords, y_coords, depth_sample, paddings), -1)\n        return frustum\n\n    def _configure_depth_net(self, depth_net_conf):\n        return DepthNet(\n            depth_net_conf['in_channels'],\n            depth_net_conf['mid_channels'],\n            self.output_channels,\n            self.depth_channels,\n            self.d_bound,\n            self.num_ranges,\n        )\n\n    def get_cam_feats(self, imgs):\n        \"\"\"Get feature maps from images.\"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, imH, imW = imgs.shape\n\n        imgs = imgs.flatten().view(batch_size * num_sweeps * num_cams,\n                                   num_channels, imH, imW)\n        backbone_feats = self.img_backbone(imgs)\n        img_feats = self.img_neck(backbone_feats)[0]\n        img_feats_reshape = img_feats.reshape(batch_size, num_sweeps, num_cams,\n                                              img_feats.shape[1],\n                                              img_feats.shape[2],\n                                              img_feats.shape[3])\n        return img_feats_reshape, backbone_feats[0].detach()\n\n    def _forward_mask(\n        self,\n        sweep_index,\n        mono_depth_all_sweeps,\n        mats_dict,\n        depth_sample,\n        depth_sample_frustum,\n        sensor2sensor_mats,\n    ):\n        \"\"\"Forward function to generate mask.\n        Args:\n            sweep_index (int): Index of sweep.\n            mono_depth_all_sweeps (list[Tensor]): List of mono_depth for\n                all sweeps.\n            mats_dict (dict):\n                sensor2ego_mats (Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats (Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats (Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats (Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat (Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            depth_sample (Tensor): Depth map of all candidates.\n            depth_sample_frustum (Tensor): Pre-generated frustum.\n            sensor2sensor_mats (Tensor): Transformation matrix from reference\n                sensor to source sensor.\n        Returns:\n            Tensor: Generated mask.\n        \"\"\"\n        num_sweeps = len(mono_depth_all_sweeps)\n        mask_all_sweeps = list()\n        for idx in range(num_sweeps):\n            if idx == sweep_index:\n                continue\n            warped_mono_depth = self.homo_warping(\n                mono_depth_all_sweeps[idx],\n                mats_dict['intrin_mats'][:, sweep_index, ...],\n                mats_dict['intrin_mats'][:, idx, ...],\n                sensor2sensor_mats[idx],\n                mats_dict['ida_mats'][:, sweep_index, ...],\n                mats_dict['ida_mats'][:, idx, ...],\n                depth_sample,\n                depth_sample_frustum.type_as(mono_depth_all_sweeps[idx]),\n            )\n            mask = self.mask_net(\n                torch.cat([\n                    mono_depth_all_sweeps[sweep_index].detach(),\n                    warped_mono_depth.mean(2).detach()\n                ], 1))\n            mask_all_sweeps.append(mask)\n        return torch.stack(mask_all_sweeps).mean(0)\n\n    def _forward_single_sweep(self,\n                              sweep_index,\n                              context,\n                              mats_dict,\n                              depth_score,\n                              is_return_depth=False):\n        \"\"\"Forward function for single sweep.\n        Args:\n            sweep_index (int): Index of sweeps.\n            sweep_imgs (Tensor): Input images.\n            mats_dict (dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            is_return_depth (bool, optional): Whether to return depth.\n                Default: False.\n        Returns:\n            Tensor: BEV feature map.\n        \"\"\"\n        batch_size, num_cams = context.shape[0], context.shape[1]\n        context = context.reshape(batch_size * num_cams, *context.shape[2:])\n        depth = depth_score\n        geom_xyz = self.get_geometry(\n            mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n            mats_dict['intrin_mats'][:, sweep_index, ...],\n            mats_dict['ida_mats'][:, sweep_index, ...],\n            mats_dict.get('bda_mat', None),\n        )\n        geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n                    self.voxel_size).int()\n        if self.training or self.use_da:\n            img_feat_with_depth = depth.unsqueeze(1) * context.unsqueeze(2)\n\n            img_feat_with_depth = self._forward_voxel_net(img_feat_with_depth)\n\n            img_feat_with_depth = img_feat_with_depth.reshape(\n                batch_size,\n                num_cams,\n                img_feat_with_depth.shape[1],\n                img_feat_with_depth.shape[2],\n                img_feat_with_depth.shape[3],\n                img_feat_with_depth.shape[4],\n            )\n            img_feat_with_depth = img_feat_with_depth.permute(0, 1, 3, 4, 5, 2)\n\n            feature_map = voxel_pooling_train(geom_xyz,\n                                              img_feat_with_depth.contiguous(),\n                                              self.voxel_num.cuda())\n        else:\n            feature_map = voxel_pooling_inference(geom_xyz, depth.contiguous(),\n                                                  context.contiguous(),\n                                                  self.voxel_num.cuda())\n        if is_return_depth:\n            return feature_map.contiguous(), depth\n        return feature_map.contiguous()\n\n    def forward(self,\n                sweep_imgs,\n                mats_dict,\n                timestamps=None,\n                is_return_depth=False):\n        \"\"\"Forward function.\n        Args:\n            sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n                num_cameras, 3, H, W).\n            mats_dict(dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            timestamps(Tensor): Timestamp for all images with the shape of(B,\n                num_sweeps, num_cameras).\n        Return:\n            Tensor: bev feature map.\n        \"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n            img_width = sweep_imgs.shape\n        context_all_sweeps = list()\n        depth_feat_all_sweeps = list()\n        img_feats_all_sweeps = list()\n        stereo_feats_all_sweeps = list()\n        mu_all_sweeps = list()\n        sigma_all_sweeps = list()\n        mono_depth_all_sweeps = list()\n        range_score_all_sweeps = list()\n        for sweep_index in range(0, num_sweeps):\n            if sweep_index > 0:\n                with torch.no_grad():\n                    img_feats, stereo_feats = self.get_cam_feats(\n                        sweep_imgs[:, sweep_index:sweep_index + 1, ...])\n                    img_feats_all_sweeps.append(\n                        img_feats.view(batch_size * num_cams,\n                                       *img_feats.shape[3:]))\n                    stereo_feats_all_sweeps.append(stereo_feats)\n                    depth_feat, context, mu, sigma, range_score, mono_depth =\\\n                        self.depth_net(img_feats.view(batch_size * num_cams,\n                                       *img_feats.shape[3:]), mats_dict)\n                    context_all_sweeps.append(\n                        self.context_downsample_net(\n                            context.reshape(batch_size * num_cams,\n                                            *context.shape[1:])))\n                    depth_feat_all_sweeps.append(depth_feat)\n            else:\n                img_feats, stereo_feats = self.get_cam_feats(\n                    sweep_imgs[:, sweep_index:sweep_index + 1, ...])\n                img_feats_all_sweeps.append(\n                    img_feats.view(batch_size * num_cams,\n                                   *img_feats.shape[3:]))\n                stereo_feats_all_sweeps.append(stereo_feats)\n                depth_feat, context, mu, sigma, range_score, mono_depth =\\\n                    self.depth_net(img_feats.view(batch_size * num_cams,\n                                   *img_feats.shape[3:]), mats_dict)\n                depth_feat_all_sweeps.append(depth_feat)\n                context_all_sweeps.append(\n                    self.context_downsample_net(\n                        context.reshape(batch_size * num_cams,\n                                        *context.shape[1:])))\n            mu_all_sweeps.append(mu)\n            sigma_all_sweeps.append(sigma)\n            mono_depth_all_sweeps.append(mono_depth)\n            range_score_all_sweeps.append(range_score)\n        depth_score_all_sweeps = list()\n        final_depth = None\n        for ref_idx in range(num_sweeps):\n            sensor2sensor_mats = list()\n            for src_idx in range(num_sweeps):\n                ref2keysensor_mats = mats_dict[\n                    'sensor2sensor_mats'][:, ref_idx, ...].inverse()\n                key2srcsensor_mats = mats_dict['sensor2sensor_mats'][:,\n                                                                     src_idx,\n                                                                     ...]\n                ref2srcsensor_mats = key2srcsensor_mats @ ref2keysensor_mats\n                sensor2sensor_mats.append(ref2srcsensor_mats)\n            if ref_idx == 0:\n                # last iteration on stage 1 does not have propagation\n                # (photometric consistency filtering)\n                if self.use_mask:\n                    stereo_depth, mask = self._forward_stereo(\n                        ref_idx,\n                        stereo_feats_all_sweeps,\n                        mono_depth_all_sweeps,\n                        mats_dict,\n                        sensor2sensor_mats,\n                        mu_all_sweeps,\n                        sigma_all_sweeps,\n                        range_score_all_sweeps,\n                        depth_feat_all_sweeps,\n                    )\n                else:\n                    stereo_depth = self._forward_stereo(\n                        ref_idx,\n                        stereo_feats_all_sweeps,\n                        mono_depth_all_sweeps,\n                        mats_dict,\n                        sensor2sensor_mats,\n                        mu_all_sweeps,\n                        sigma_all_sweeps,\n                        range_score_all_sweeps,\n                        depth_feat_all_sweeps,\n                    )\n            else:\n                with torch.no_grad():\n                    # last iteration on stage 1 does not have\n                    # propagation (photometric consistency filtering)\n                    if self.use_mask:\n                        stereo_depth, mask = self._forward_stereo(\n                            ref_idx,\n                            stereo_feats_all_sweeps,\n                            mono_depth_all_sweeps,\n                            mats_dict,\n                            sensor2sensor_mats,\n                            mu_all_sweeps,\n                            sigma_all_sweeps,\n                            range_score_all_sweeps,\n                            depth_feat_all_sweeps,\n                        )\n                    else:\n                        stereo_depth = self._forward_stereo(\n                            ref_idx,\n                            stereo_feats_all_sweeps,\n                            mono_depth_all_sweeps,\n                            mats_dict,\n                            sensor2sensor_mats,\n                            mu_all_sweeps,\n                            sigma_all_sweeps,\n                            range_score_all_sweeps,\n                            depth_feat_all_sweeps,\n                        )\n            if self.use_mask:\n                depth_score = (\n                    mono_depth_all_sweeps[ref_idx] +\n                    self.depth_downsample_net(stereo_depth) * mask).softmax(\n                        1, dtype=stereo_depth.dtype)\n            else:\n                depth_score = (\n                    mono_depth_all_sweeps[ref_idx] +\n                    self.depth_downsample_net(stereo_depth)).softmax(\n                        1, dtype=stereo_depth.dtype)\n            depth_score_all_sweeps.append(depth_score)\n            if ref_idx == 0:\n                # final_depth has to be fp32, otherwise the\n                # depth loss will colapse during the traing process.\n                final_depth = (\n                    mono_depth_all_sweeps[ref_idx] +\n                    self.depth_downsample_net(stereo_depth)).softmax(1)\n        key_frame_res = self._forward_single_sweep(\n            0,\n            context_all_sweeps[0].reshape(batch_size, num_cams,\n                                          *context_all_sweeps[0].shape[1:]),\n            mats_dict,\n            depth_score_all_sweeps[0],\n            is_return_depth=is_return_depth,\n        )\n        if num_sweeps == 1:\n            return key_frame_res\n\n        key_frame_feature = key_frame_res[\n            0] if is_return_depth else key_frame_res\n\n        ret_feature_list = [key_frame_feature]\n        for sweep_index in range(1, num_sweeps):\n            with torch.no_grad():\n                feature_map = self._forward_single_sweep(\n                    sweep_index,\n                    context_all_sweeps[sweep_index].reshape(\n                        batch_size, num_cams,\n                        *context_all_sweeps[sweep_index].shape[1:]),\n                    mats_dict,\n                    depth_score_all_sweeps[sweep_index],\n                    is_return_depth=False,\n                )\n                ret_feature_list.append(feature_map)\n\n        if is_return_depth:\n            return torch.cat(ret_feature_list, 1), final_depth\n        else:\n            return torch.cat(ret_feature_list, 1)\n"
  },
  {
    "path": "bevdepth/layers/backbones/fusion_lss_fpn.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmdet.models.backbones.resnet import BasicBlock\n\ntry:\n    from bevdepth.ops.voxel_pooling_inference import voxel_pooling_inference\n    from bevdepth.ops.voxel_pooling_train import voxel_pooling_train\nexcept ImportError:\n    print('Import VoxelPooling fail.')\n\nfrom .base_lss_fpn import ASPP, BaseLSSFPN, Mlp, SELayer\n\n__all__ = ['FusionLSSFPN']\n\n\nclass DepthNet(nn.Module):\n\n    def __init__(self, in_channels, mid_channels, context_channels,\n                 depth_channels):\n        super(DepthNet, self).__init__()\n        self.reduce_conv = nn.Sequential(\n            nn.Conv2d(in_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n        self.context_conv = nn.Conv2d(mid_channels,\n                                      context_channels,\n                                      kernel_size=1,\n                                      stride=1,\n                                      padding=0)\n        self.mlp = Mlp(1, mid_channels, mid_channels)\n        self.se = SELayer(mid_channels)  # NOTE: add camera-aware\n        self.depth_gt_conv = nn.Sequential(\n            nn.Conv2d(1, mid_channels, kernel_size=1, stride=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(mid_channels, mid_channels, kernel_size=1, stride=1),\n        )\n        self.depth_conv = nn.Sequential(\n            BasicBlock(mid_channels, mid_channels),\n            BasicBlock(mid_channels, mid_channels),\n            BasicBlock(mid_channels, mid_channels),\n        )\n        self.aspp = ASPP(mid_channels, mid_channels)\n        self.depth_pred = nn.Conv2d(mid_channels,\n                                    depth_channels,\n                                    kernel_size=1,\n                                    stride=1,\n                                    padding=0)\n\n    def forward(self, x, mats_dict, lidar_depth, scale_depth_factor=1000.0):\n        x = self.reduce_conv(x)\n        context = self.context_conv(x)\n        inv_intrinsics = torch.inverse(mats_dict['intrin_mats'][:, 0:1, ...])\n        pixel_size = torch.norm(torch.stack(\n            [inv_intrinsics[..., 0, 0], inv_intrinsics[..., 1, 1]], dim=-1),\n                                dim=-1).reshape(-1, 1)\n        aug_scale = torch.sqrt(mats_dict['ida_mats'][:, 0, :, 0, 0]**2 +\n                               mats_dict['ida_mats'][:, 0, :, 0,\n                                                     0]**2).reshape(-1, 1)\n        scaled_pixel_size = pixel_size * scale_depth_factor / aug_scale\n        x_se = self.mlp(scaled_pixel_size)[..., None, None]\n        x = self.se(x, x_se)\n        depth = self.depth_gt_conv(lidar_depth)\n        depth = self.depth_conv(x + depth)\n        depth = self.aspp(depth)\n        depth = self.depth_pred(depth)\n        return torch.cat([depth, context], dim=1)\n\n\nclass FusionLSSFPN(BaseLSSFPN):\n\n    def _configure_depth_net(self, depth_net_conf):\n        return DepthNet(\n            depth_net_conf['in_channels'],\n            depth_net_conf['mid_channels'],\n            self.output_channels,\n            self.depth_channels,\n        )\n\n    def _forward_depth_net(self, feat, mats_dict, lidar_depth):\n        return self.depth_net(feat, mats_dict, lidar_depth)\n\n    def _forward_single_sweep(self,\n                              sweep_index,\n                              sweep_imgs,\n                              mats_dict,\n                              sweep_lidar_depth,\n                              is_return_depth=False):\n        \"\"\"Forward function for single sweep.\n\n        Args:\n            sweep_index (int): Index of sweeps.\n            sweep_imgs (Tensor): Input images.\n            mats_dict (dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            sweep_lidar_depth (Tensor): Depth generated by lidar.\n            is_return_depth (bool, optional): Whether to return depth.\n                Default: False.\n\n        Returns:\n            Tensor: BEV feature map.\n        \"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n            img_width = sweep_imgs.shape\n        img_feats = self.get_cam_feats(sweep_imgs)\n        sweep_lidar_depth = sweep_lidar_depth.reshape(\n            batch_size * num_cams, *sweep_lidar_depth.shape[2:])\n        source_features = img_feats[:, 0, ...]\n        depth_feature = self._forward_depth_net(\n            source_features.reshape(batch_size * num_cams,\n                                    source_features.shape[2],\n                                    source_features.shape[3],\n                                    source_features.shape[4]), mats_dict,\n            sweep_lidar_depth)\n        depth = depth_feature[:, :self.depth_channels].softmax(\n            dim=1, dtype=depth_feature.dtype)\n        geom_xyz = self.get_geometry(\n            mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n            mats_dict['intrin_mats'][:, sweep_index, ...],\n            mats_dict['ida_mats'][:, sweep_index, ...],\n            mats_dict.get('bda_mat', None),\n        )\n        geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n                    self.voxel_size).int()\n        if self.training or self.use_da:\n            img_feat_with_depth = depth.unsqueeze(\n                1) * depth_feature[:, self.depth_channels:(\n                    self.depth_channels + self.output_channels)].unsqueeze(2)\n\n            img_feat_with_depth = self._forward_voxel_net(img_feat_with_depth)\n\n            img_feat_with_depth = img_feat_with_depth.reshape(\n                batch_size,\n                num_cams,\n                img_feat_with_depth.shape[1],\n                img_feat_with_depth.shape[2],\n                img_feat_with_depth.shape[3],\n                img_feat_with_depth.shape[4],\n            )\n\n            img_feat_with_depth = img_feat_with_depth.permute(0, 1, 3, 4, 5, 2)\n\n            feature_map = voxel_pooling_train(geom_xyz,\n                                              img_feat_with_depth.contiguous(),\n                                              self.voxel_num.cuda())\n        else:\n            feature_map = voxel_pooling_inference(\n                geom_xyz, depth, depth_feature[:, self.depth_channels:(\n                    self.depth_channels + self.output_channels)].contiguous(),\n                self.voxel_num.cuda())\n        if is_return_depth:\n            return feature_map.contiguous(), depth.float()\n        return feature_map.contiguous()\n\n    def forward(self,\n                sweep_imgs,\n                mats_dict,\n                lidar_depth,\n                timestamps=None,\n                is_return_depth=False):\n        \"\"\"Forward function.\n\n        Args:\n            sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n                num_cameras, 3, H, W).\n            mats_dict(dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            lidar_depth (Tensor): Depth generated by lidar.\n            timestamps(Tensor): Timestamp for all images with the shape of(B,\n                num_sweeps, num_cameras).\n\n        Return:\n            Tensor: bev feature map.\n        \"\"\"\n        batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n            img_width = sweep_imgs.shape\n        lidar_depth = self.get_downsampled_lidar_depth(lidar_depth)\n        key_frame_res = self._forward_single_sweep(\n            0,\n            sweep_imgs[:, 0:1, ...],\n            mats_dict,\n            lidar_depth[:, 0, ...],\n            is_return_depth=is_return_depth)\n        if num_sweeps == 1:\n            return key_frame_res\n\n        key_frame_feature = key_frame_res[\n            0] if is_return_depth else key_frame_res\n\n        ret_feature_list = [key_frame_feature]\n        for sweep_index in range(1, num_sweeps):\n            with torch.no_grad():\n                feature_map = self._forward_single_sweep(\n                    sweep_index,\n                    sweep_imgs[:, sweep_index:sweep_index + 1, ...],\n                    mats_dict,\n                    lidar_depth[:, sweep_index, ...],\n                    is_return_depth=False)\n                ret_feature_list.append(feature_map)\n\n        if is_return_depth:\n            return torch.cat(ret_feature_list, 1), key_frame_res[1]\n        else:\n            return torch.cat(ret_feature_list, 1)\n\n    def get_downsampled_lidar_depth(self, lidar_depth):\n        batch_size, num_sweeps, num_cams, height, width = lidar_depth.shape\n        lidar_depth = lidar_depth.view(\n            batch_size * num_sweeps * num_cams,\n            height // self.downsample_factor,\n            self.downsample_factor,\n            width // self.downsample_factor,\n            self.downsample_factor,\n            1,\n        )\n        lidar_depth = lidar_depth.permute(0, 1, 3, 5, 2, 4).contiguous()\n        lidar_depth = lidar_depth.view(\n            -1, self.downsample_factor * self.downsample_factor)\n        gt_depths_tmp = torch.where(lidar_depth == 0.0, lidar_depth.max(),\n                                    lidar_depth)\n        lidar_depth = torch.min(gt_depths_tmp, dim=-1).values\n        lidar_depth = lidar_depth.view(batch_size, num_sweeps, num_cams, 1,\n                                       height // self.downsample_factor,\n                                       width // self.downsample_factor)\n        lidar_depth = lidar_depth / self.d_bound[1]\n        return lidar_depth\n"
  },
  {
    "path": "bevdepth/layers/backbones/matrixvt.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nfrom torch import nn\nfrom torch.cuda.amp import autocast\n\nfrom bevdepth.layers.backbones.base_lss_fpn import BaseLSSFPN\n\n\nclass HoriConv(nn.Module):\n\n    def __init__(self, in_channels, mid_channels, out_channels, cat_dim=0):\n        \"\"\"HoriConv that reduce the image feature\n            in height dimension and refine it.\n\n        Args:\n            in_channels (int): in_channels\n            mid_channels (int): mid_channels\n            out_channels (int): output channels\n            cat_dim (int, optional): channels of position\n                embedding. Defaults to 0.\n        \"\"\"\n        super().__init__()\n\n        self.merger = nn.Sequential(\n            nn.Conv2d(in_channels + cat_dim,\n                      in_channels,\n                      kernel_size=1,\n                      bias=True),\n            nn.Sigmoid(),\n            nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=True),\n        )\n\n        self.reduce_conv = nn.Sequential(\n            nn.Conv1d(\n                in_channels,\n                mid_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=False,\n            ),\n            nn.BatchNorm1d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n\n        self.conv1 = nn.Sequential(\n            nn.Conv1d(\n                mid_channels,\n                mid_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=False,\n            ),\n            nn.BatchNorm1d(mid_channels),\n            nn.ReLU(inplace=True),\n            nn.Conv1d(\n                mid_channels,\n                mid_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=False,\n            ),\n            nn.BatchNorm1d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n\n        self.conv2 = nn.Sequential(\n            nn.Conv1d(\n                mid_channels,\n                mid_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=False,\n            ),\n            nn.BatchNorm1d(mid_channels),\n            nn.ReLU(inplace=True),\n            nn.Conv1d(\n                mid_channels,\n                mid_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=False,\n            ),\n            nn.BatchNorm1d(mid_channels),\n            nn.ReLU(inplace=True),\n        )\n\n        self.out_conv = nn.Sequential(\n            nn.Conv1d(\n                mid_channels,\n                out_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=True,\n            ),\n            nn.BatchNorm1d(out_channels),\n            nn.ReLU(inplace=True),\n        )\n\n    @autocast(False)\n    def forward(self, x, pe=None):\n        # [N,C,H,W]\n        if pe is not None:\n            x = self.merger(torch.cat([x, pe], 1))\n        else:\n            x = self.merger(x)\n        x = x.max(2)[0]\n        x = self.reduce_conv(x)\n        x = self.conv1(x) + x\n        x = self.conv2(x) + x\n        x = self.out_conv(x)\n        return x\n\n\nclass DepthReducer(nn.Module):\n\n    def __init__(self, img_channels, mid_channels):\n        \"\"\"Module that compresses the predicted\n            categorical depth in height dimension\n\n        Args:\n            img_channels (int): in_channels\n            mid_channels (int): mid_channels\n        \"\"\"\n        super().__init__()\n        self.vertical_weighter = nn.Sequential(\n            nn.Conv2d(img_channels,\n                      mid_channels,\n                      kernel_size=3,\n                      stride=1,\n                      padding=1),\n            nn.BatchNorm2d(mid_channels),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(mid_channels, 1, kernel_size=3, stride=1, padding=1),\n        )\n\n    @autocast(False)\n    def forward(self, feat, depth):\n        vert_weight = self.vertical_weighter(feat).softmax(2)  # [N,1,H,W]\n        depth = (depth * vert_weight).sum(2)\n        return depth\n\n\n# NOTE Modified Lift-Splat\nclass MatrixVT(BaseLSSFPN):\n\n    def __init__(\n        self,\n        x_bound,\n        y_bound,\n        z_bound,\n        d_bound,\n        final_dim,\n        downsample_factor,\n        output_channels,\n        img_backbone_conf,\n        img_neck_conf,\n        depth_net_conf,\n    ):\n        \"\"\"Modified from LSSFPN.\n\n        Args:\n            x_bound (list): Boundaries for x.\n            y_bound (list): Boundaries for y.\n            z_bound (list): Boundaries for z.\n            d_bound (list): Boundaries for d.\n            final_dim (list): Dimension for input images.\n            downsample_factor (int): Downsample factor between feature map\n                and input image.\n            output_channels (int): Number of channels for the output\n                feature map.\n            img_backbone_conf (dict): Config for image backbone.\n            img_neck_conf (dict): Config for image neck.\n            depth_net_conf (dict): Config for depth net.\n        \"\"\"\n        super().__init__(\n            x_bound,\n            y_bound,\n            z_bound,\n            d_bound,\n            final_dim,\n            downsample_factor,\n            output_channels,\n            img_backbone_conf,\n            img_neck_conf,\n            depth_net_conf,\n            use_da=False,\n        )\n\n        self.register_buffer('bev_anchors',\n                             self.create_bev_anchors(x_bound, y_bound))\n        self.horiconv = HoriConv(self.output_channels, 512,\n                                 self.output_channels)\n        self.depth_reducer = DepthReducer(self.output_channels,\n                                          self.output_channels)\n        self.static_mat = None\n\n    def create_bev_anchors(self, x_bound, y_bound, ds_rate=1):\n        \"\"\"Create anchors in BEV space\n\n        Args:\n            x_bound (list): xbound in meters [start, end, step]\n            y_bound (list): ybound in meters [start, end, step]\n            ds_rate (iint, optional): downsample rate. Defaults to 1.\n\n        Returns:\n            anchors: anchors in [W, H, 2]\n        \"\"\"\n        x_coords = ((torch.linspace(\n            x_bound[0],\n            x_bound[1] - x_bound[2] * ds_rate,\n            self.voxel_num[0] // ds_rate,\n            dtype=torch.float,\n        ) + x_bound[2] * ds_rate / 2).view(self.voxel_num[0] // ds_rate,\n                                           1).expand(\n                                               self.voxel_num[0] // ds_rate,\n                                               self.voxel_num[1] // ds_rate))\n        y_coords = ((torch.linspace(\n            y_bound[0],\n            y_bound[1] - y_bound[2] * ds_rate,\n            self.voxel_num[1] // ds_rate,\n            dtype=torch.float,\n        ) + y_bound[2] * ds_rate / 2).view(\n            1,\n            self.voxel_num[1] // ds_rate).expand(self.voxel_num[0] // ds_rate,\n                                                 self.voxel_num[1] // ds_rate))\n\n        anchors = torch.stack([x_coords, y_coords]).permute(1, 2, 0)\n        return anchors\n\n    def get_proj_mat(self, mats_dict=None):\n        \"\"\"Create the Ring Matrix and Ray Matrix\n\n        Args:\n            mats_dict (dict, optional): dictionary that\n                contains intrin- and extrin- parameters.\n            Defaults to None.\n\n        Returns:\n            tuple: Ring Matrix in [B, D, L, L] and Ray Matrix in [B, W, L, L]\n        \"\"\"\n        if self.static_mat is not None:\n            return self.static_mat\n\n        bev_size = int(self.voxel_num[0])  # only consider square BEV\n        geom_sep = self.get_geometry(\n            mats_dict['sensor2ego_mats'][:, 0, ...],\n            mats_dict['intrin_mats'][:, 0, ...],\n            mats_dict['ida_mats'][:, 0, ...],\n            mats_dict.get('bda_mat', None),\n        )\n        geom_sep = (\n            geom_sep -\n            (self.voxel_coord - self.voxel_size / 2.0)) / self.voxel_size\n        geom_sep = geom_sep.mean(3).permute(0, 1, 3, 2,\n                                            4).contiguous()  # B,Ncam,W,D,2\n        B, Nc, W, D, _ = geom_sep.shape\n        geom_sep = geom_sep.long().view(B, Nc * W, D, -1)[..., :2]\n\n        invalid1 = torch.logical_or((geom_sep < 0)[..., 0], (geom_sep < 0)[...,\n                                                                           1])\n        invalid2 = torch.logical_or((geom_sep > (bev_size - 1))[..., 0],\n                                    (geom_sep > (bev_size - 1))[..., 1])\n        geom_sep[(invalid1 | invalid2)] = int(bev_size / 2)\n        geom_idx = geom_sep[..., 1] * bev_size + geom_sep[..., 0]\n\n        geom_uni = self.bev_anchors[None].repeat([B, 1, 1, 1])  # B,128,128,2\n        B, L, L, _ = geom_uni.shape\n\n        circle_map = geom_uni.new_zeros((B, D, L * L))\n\n        ray_map = geom_uni.new_zeros((B, Nc * W, L * L))\n        for b in range(B):\n            for dir in range(Nc * W):\n                ray_map[b, dir, geom_idx[b, dir]] += 1\n            for d in range(D):\n                circle_map[b, d, geom_idx[b, :, d]] += 1\n        null_point = int((bev_size / 2) * (bev_size + 1))\n        circle_map[..., null_point] = 0\n        ray_map[..., null_point] = 0\n        circle_map = circle_map.view(B, D, L * L)\n        ray_map = ray_map.view(B, -1, L * L)\n        circle_map /= circle_map.max(1)[0].clip(min=1)[:, None]\n        ray_map /= ray_map.max(1)[0].clip(min=1)[:, None]\n\n        return circle_map, ray_map\n\n    @autocast(False)\n    def reduce_and_project(self, feature, depth, mats_dict):\n        \"\"\"reduce the feature and depth in height\n            dimension and make BEV feature\n\n        Args:\n            feature (Tensor): image feature in [B, C, H, W]\n            depth (Tensor): Depth Prediction in [B, D, H, W]\n            mats_dict (dict): dictionary that contains intrin-\n                and extrin- parameters\n\n        Returns:\n            Tensor: BEV feature in B, C, L, L\n        \"\"\"\n        # [N,112,H,W], [N,256,H,W]\n        depth = self.depth_reducer(feature, depth)\n\n        B = mats_dict['intrin_mats'].shape[0]\n\n        # N, C, H, W = feature.shape\n        # feature=feature.reshape(N,C*H,W)\n        feature = self.horiconv(feature)\n        # feature = feature.max(2)[0]\n        # [N.112,W], [N,C,W]\n        depth = depth.permute(0, 2, 1).reshape(B, -1, self.depth_channels)\n        feature = feature.permute(0, 2, 1).reshape(B, -1, self.output_channels)\n        circle_map, ray_map = self.get_proj_mat(mats_dict)\n\n        proj_mat = depth.matmul(circle_map)\n        proj_mat = (proj_mat * ray_map).permute(0, 2, 1)\n        img_feat_with_depth = proj_mat.matmul(feature)\n        img_feat_with_depth = img_feat_with_depth.permute(0, 2, 1).reshape(\n            B, -1, *self.voxel_num[:2])\n\n        return img_feat_with_depth\n\n    def _forward_single_sweep(self,\n                              sweep_index,\n                              sweep_imgs,\n                              mats_dict,\n                              is_return_depth=False):\n        (\n            batch_size,\n            num_sweeps,\n            num_cams,\n            num_channels,\n            img_height,\n            img_width,\n        ) = sweep_imgs.shape\n        img_feats = self.get_cam_feats(sweep_imgs)\n        source_features = img_feats[:, 0, ...]\n        depth_feature = self.depth_net(\n            source_features.reshape(\n                batch_size * num_cams,\n                source_features.shape[2],\n                source_features.shape[3],\n                source_features.shape[4],\n            ),\n            mats_dict,\n        )\n        with autocast(enabled=False):\n            feature = depth_feature[:, self.depth_channels:(\n                self.depth_channels + self.output_channels)].float()\n            depth = depth_feature[:, :self.depth_channels].float().softmax(1)\n\n            img_feat_with_depth = self.reduce_and_project(\n                feature, depth, mats_dict)  # [b*n, c, d, w]\n\n            if is_return_depth:\n                return img_feat_with_depth.contiguous(), depth\n            return img_feat_with_depth.contiguous()\n\n\nif __name__ == '__main__':\n    backbone_conf = {\n        'x_bound': [-51.2, 51.2, 0.8],  # BEV grids bounds and size (m)\n        'y_bound': [-51.2, 51.2, 0.8],  # BEV grids bounds and size (m)\n        'z_bound': [-5, 3, 8],  # BEV grids bounds and size (m)\n        'd_bound': [2.0, 58.0,\n                    0.5],  # Categorical Depth bounds and division (m)\n        'final_dim': (256, 704),  # img size for model input (pix)\n        'output_channels':\n        80,  # BEV feature channels\n        'downsample_factor':\n        16,  # ds factor of the feature to be projected to BEV (e.g. 256x704 -> 16x44)  # noqa\n        'img_backbone_conf':\n        dict(\n            type='ResNet',\n            depth=50,\n            frozen_stages=0,\n            out_indices=[0, 1, 2, 3],\n            norm_eval=False,\n            init_cfg=dict(type='Pretrained',\n                          checkpoint='torchvision://resnet50'),\n        ),\n        'img_neck_conf':\n        dict(\n            type='SECONDFPN',\n            in_channels=[256, 512, 1024, 2048],\n            upsample_strides=[0.25, 0.5, 1, 2],\n            out_channels=[128, 128, 128, 128],\n        ),\n        'depth_net_conf':\n        dict(in_channels=512, mid_channels=512),\n    }\n\n    model = MatrixVT(**backbone_conf)\n    # for inference and deployment where intrin & extrin mats are static\n    # model.static_mat = model.get_proj_mat(mats_dict)\n\n    bev_feature, depth = model(\n        torch.rand((2, 1, 6, 3, 256, 704)), {\n            'sensor2ego_mats': torch.rand((2, 1, 6, 4, 4)),\n            'intrin_mats': torch.rand((2, 1, 6, 4, 4)),\n            'ida_mats': torch.rand((2, 1, 6, 4, 4)),\n            'sensor2sensor_mats': torch.rand((2, 1, 6, 4, 4)),\n            'bda_mat': torch.rand((2, 4, 4)),\n        },\n        is_return_depth=True)\n\n    print(bev_feature.shape, depth.shape)\n"
  },
  {
    "path": "bevdepth/layers/heads/__init__.py",
    "content": "from .bev_depth_head import BEVDepthHead\n\n__all__ = ['BEVDepthHead']\n"
  },
  {
    "path": "bevdepth/layers/heads/bev_depth_head.py",
    "content": "\"\"\"Inherited from `https://github.com/open-mmlab/mmdetection3d/blob/master/mmdet3d/models/dense_heads/centerpoint_head.py`\"\"\"  # noqa\nimport numba\nimport numpy as np\nimport torch\nfrom mmdet3d.core import draw_heatmap_gaussian, gaussian_radius\nfrom mmdet3d.models import build_neck\nfrom mmdet3d.models.dense_heads.centerpoint_head import CenterHead, circle_nms\nfrom mmdet3d.models.utils import clip_sigmoid\nfrom mmdet.core import reduce_mean\nfrom mmdet.models import build_backbone\nfrom torch.cuda.amp import autocast\n\n__all__ = ['BEVDepthHead']\n\nbev_backbone_conf = dict(\n    type='ResNet',\n    in_channels=80,\n    depth=18,\n    num_stages=3,\n    strides=(1, 2, 2),\n    dilations=(1, 1, 1),\n    out_indices=[0, 1, 2],\n    norm_eval=False,\n    base_channels=160,\n)\n\nbev_neck_conf = dict(type='SECONDFPN',\n                     in_channels=[160, 320, 640],\n                     upsample_strides=[2, 4, 8],\n                     out_channels=[64, 64, 128])\n\n\n@numba.jit(nopython=True)\ndef size_aware_circle_nms(dets, thresh_scale, post_max_size=83):\n    \"\"\"Circular NMS.\n    An object is only counted as positive if no other center\n    with a higher confidence exists within a radius r using a\n    bird-eye view distance metric.\n    Args:\n        dets (torch.Tensor): Detection results with the shape of [N, 3].\n        thresh (float): Value of threshold.\n        post_max_size (int): Max number of prediction to be kept. Defaults\n            to 83\n    Returns:\n        torch.Tensor: Indexes of the detections to be kept.\n    \"\"\"\n    x1 = dets[:, 0]\n    y1 = dets[:, 1]\n    dx1 = dets[:, 2]\n    dy1 = dets[:, 3]\n    yaws = dets[:, 4]\n    scores = dets[:, -1]\n    order = scores.argsort()[::-1].astype(np.int32)  # highest->lowest\n    ndets = dets.shape[0]\n    suppressed = np.zeros((ndets), dtype=np.int32)\n    keep = []\n    for _i in range(ndets):\n        i = order[_i]  # start with highest score box\n        if suppressed[\n                i] == 1:  # if any box have enough iou with this, remove it\n            continue\n        keep.append(i)\n        for _j in range(_i + 1, ndets):\n            j = order[_j]\n            if suppressed[j] == 1:\n                continue\n            # calculate center distance between i and j box\n            dist_x = abs(x1[i] - x1[j])\n            dist_y = abs(y1[i] - y1[j])\n            dist_x_th = (abs(dx1[i] * np.cos(yaws[i])) +\n                         abs(dx1[j] * np.cos(yaws[j])) +\n                         abs(dy1[i] * np.sin(yaws[i])) +\n                         abs(dy1[j] * np.sin(yaws[j])))\n            dist_y_th = (abs(dx1[i] * np.sin(yaws[i])) +\n                         abs(dx1[j] * np.sin(yaws[j])) +\n                         abs(dy1[i] * np.cos(yaws[i])) +\n                         abs(dy1[j] * np.cos(yaws[j])))\n            # ovr = inter / areas[j]\n            if dist_x <= dist_x_th * thresh_scale / 2 and \\\n               dist_y <= dist_y_th * thresh_scale / 2:\n                suppressed[j] = 1\n    return keep[:post_max_size]\n\n\nclass BEVDepthHead(CenterHead):\n    \"\"\"Head for BevDepth.\n\n    Args:\n        in_channels(int): Number of channels after bev_neck.\n        tasks(dict): Tasks for head.\n        bbox_coder(dict): Config of bbox coder.\n        common_heads(dict): Config of head for each task.\n        loss_cls(dict): Config of classification loss.\n        loss_bbox(dict): Config of regression loss.\n        gaussian_overlap(float): Gaussian overlap used for `get_targets`.\n        min_radius(int): Min radius used for `get_targets`.\n        train_cfg(dict): Config used in the training process.\n        test_cfg(dict): Config used in the test process.\n        bev_backbone_conf(dict): Cnfig of bev_backbone.\n        bev_neck_conf(dict): Cnfig of bev_neck.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels=256,\n        tasks=None,\n        bbox_coder=None,\n        common_heads=dict(),\n        loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),\n        loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n        gaussian_overlap=0.1,\n        min_radius=2,\n        train_cfg=None,\n        test_cfg=None,\n        bev_backbone_conf=bev_backbone_conf,\n        bev_neck_conf=bev_neck_conf,\n        separate_head=dict(type='SeparateHead',\n                           init_bias=-2.19,\n                           final_kernel=3),\n    ):\n        super(BEVDepthHead, self).__init__(\n            in_channels=in_channels,\n            tasks=tasks,\n            bbox_coder=bbox_coder,\n            common_heads=common_heads,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            separate_head=separate_head,\n        )\n        self.trunk = build_backbone(bev_backbone_conf)\n        self.trunk.init_weights()\n        self.neck = build_neck(bev_neck_conf)\n        self.neck.init_weights()\n        del self.trunk.maxpool\n        self.gaussian_overlap = gaussian_overlap\n        self.min_radius = min_radius\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    @autocast(False)\n    def forward(self, x):\n        \"\"\"Forward pass.\n\n        Args:\n            feats (list[torch.Tensor]): Multi-level features, e.g.,\n                features produced by FPN.\n\n        Returns:\n            tuple(list[dict]): Output results for tasks.\n        \"\"\"\n        x = x.float()\n        # FPN\n        trunk_outs = [x]\n        if self.trunk.deep_stem:\n            x = self.trunk.stem(x)\n        else:\n            x = self.trunk.conv1(x)\n            x = self.trunk.norm1(x)\n            x = self.trunk.relu(x)\n        for i, layer_name in enumerate(self.trunk.res_layers):\n            res_layer = getattr(self.trunk, layer_name)\n            x = res_layer(x)\n            if i in self.trunk.out_indices:\n                trunk_outs.append(x)\n        fpn_output = self.neck(trunk_outs)\n        ret_values = super().forward(fpn_output)\n        return ret_values\n\n    def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):\n        \"\"\"Generate training targets for a single sample.\n\n        Args:\n            gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n            gt_labels_3d (torch.Tensor): Labels of boxes.\n\n        Returns:\n            tuple[list[torch.Tensor]]: Tuple of target including \\\n                the following results in order.\n\n                - list[torch.Tensor]: Heatmap scores.\n                - list[torch.Tensor]: Ground truth boxes.\n                - list[torch.Tensor]: Indexes indicating the position \\\n                    of the valid boxes.\n                - list[torch.Tensor]: Masks indicating which boxes \\\n                    are valid.\n        \"\"\"\n        max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']\n        grid_size = torch.tensor(self.train_cfg['grid_size'])\n        pc_range = torch.tensor(self.train_cfg['point_cloud_range'])\n        voxel_size = torch.tensor(self.train_cfg['voxel_size'])\n\n        feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']\n\n        # reorganize the gt_dict by tasks\n        task_masks = []\n        flag = 0\n        for class_name in self.class_names:\n            task_masks.append([\n                torch.where(gt_labels_3d == class_name.index(i) + flag)\n                for i in class_name\n            ])\n            flag += len(class_name)\n\n        task_boxes = []\n        task_classes = []\n        flag2 = 0\n        for idx, mask in enumerate(task_masks):\n            task_box = []\n            task_class = []\n            for m in mask:\n                task_box.append(gt_bboxes_3d[m])\n                # 0 is background for each task, so we need to add 1 here.\n                task_class.append(gt_labels_3d[m] + 1 - flag2)\n            task_boxes.append(\n                torch.cat(task_box, axis=0).to(gt_bboxes_3d.device))\n            task_classes.append(\n                torch.cat(task_class).long().to(gt_bboxes_3d.device))\n            flag2 += len(mask)\n        draw_gaussian = draw_heatmap_gaussian\n        heatmaps, anno_boxes, inds, masks = [], [], [], []\n\n        for idx, task_head in enumerate(self.task_heads):\n            heatmap = gt_bboxes_3d.new_zeros(\n                (len(self.class_names[idx]), feature_map_size[1],\n                 feature_map_size[0]),\n                device='cuda')\n\n            anno_box = gt_bboxes_3d.new_zeros(\n                (max_objs, len(self.train_cfg['code_weights'])),\n                dtype=torch.float32,\n                device='cuda')\n\n            ind = gt_labels_3d.new_zeros((max_objs),\n                                         dtype=torch.int64,\n                                         device='cuda')\n            mask = gt_bboxes_3d.new_zeros((max_objs),\n                                          dtype=torch.uint8,\n                                          device='cuda')\n\n            num_objs = min(task_boxes[idx].shape[0], max_objs)\n\n            for k in range(num_objs):\n                cls_id = task_classes[idx][k] - 1\n\n                width = task_boxes[idx][k][3]\n                length = task_boxes[idx][k][4]\n                width = width / voxel_size[0] / self.train_cfg[\n                    'out_size_factor']\n                length = length / voxel_size[1] / self.train_cfg[\n                    'out_size_factor']\n\n                if width > 0 and length > 0:\n                    radius = gaussian_radius(\n                        (length, width),\n                        min_overlap=self.train_cfg['gaussian_overlap'])\n                    radius = max(self.train_cfg['min_radius'], int(radius))\n\n                    # be really careful for the coordinate system of\n                    # your box annotation.\n                    x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][\n                        1], task_boxes[idx][k][2]\n\n                    coor_x = (\n                        x - pc_range[0]\n                    ) / voxel_size[0] / self.train_cfg['out_size_factor']\n                    coor_y = (\n                        y - pc_range[1]\n                    ) / voxel_size[1] / self.train_cfg['out_size_factor']\n\n                    center = torch.tensor([coor_x, coor_y],\n                                          dtype=torch.float32,\n                                          device='cuda')\n                    center_int = center.to(torch.int32)\n\n                    # throw out not in range objects to avoid out of array\n                    # area when creating the heatmap\n                    if not (0 <= center_int[0] < feature_map_size[0]\n                            and 0 <= center_int[1] < feature_map_size[1]):\n                        continue\n\n                    draw_gaussian(heatmap[cls_id], center_int, radius)\n\n                    new_idx = k\n                    x, y = center_int[0], center_int[1]\n\n                    assert y * feature_map_size[0] + x < feature_map_size[\n                        0] * feature_map_size[1]\n\n                    ind[new_idx] = y * feature_map_size[0] + x\n                    mask[new_idx] = 1\n                    # TODO: support other outdoor dataset\n                    if len(task_boxes[idx][k]) > 7:\n                        vx, vy = task_boxes[idx][k][7:]\n                    rot = task_boxes[idx][k][6]\n                    box_dim = task_boxes[idx][k][3:6]\n                    if self.norm_bbox:\n                        box_dim = box_dim.log()\n                    if len(task_boxes[idx][k]) > 7:\n                        anno_box[new_idx] = torch.cat([\n                            center - torch.tensor([x, y], device='cuda'),\n                            z.unsqueeze(0),\n                            box_dim,\n                            torch.sin(rot).unsqueeze(0),\n                            torch.cos(rot).unsqueeze(0),\n                            vx.unsqueeze(0),\n                            vy.unsqueeze(0),\n                        ])\n                    else:\n                        anno_box[new_idx] = torch.cat([\n                            center - torch.tensor([x, y], device='cuda'),\n                            z.unsqueeze(0), box_dim,\n                            torch.sin(rot).unsqueeze(0),\n                            torch.cos(rot).unsqueeze(0)\n                        ])\n\n            heatmaps.append(heatmap)\n            anno_boxes.append(anno_box)\n            masks.append(mask)\n            inds.append(ind)\n        return heatmaps, anno_boxes, inds, masks\n\n    def loss(self, targets, preds_dicts, **kwargs):\n        \"\"\"Loss function for BEVDepthHead.\n\n        Args:\n            gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n                truth gt boxes.\n            gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n            preds_dicts (dict): Output of forward function.\n\n        Returns:\n            dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n        \"\"\"\n        heatmaps, anno_boxes, inds, masks = targets\n        return_loss = 0\n        for task_id, preds_dict in enumerate(preds_dicts):\n            # heatmap focal loss\n            preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap'])\n            num_pos = heatmaps[task_id].eq(1).float().sum().item()\n            cls_avg_factor = torch.clamp(reduce_mean(\n                heatmaps[task_id].new_tensor(num_pos)),\n                                         min=1).item()\n            loss_heatmap = self.loss_cls(preds_dict[0]['heatmap'],\n                                         heatmaps[task_id],\n                                         avg_factor=cls_avg_factor)\n            target_box = anno_boxes[task_id]\n            # reconstruct the anno_box from multiple reg heads\n            if 'vel' in preds_dict[0].keys():\n                preds_dict[0]['anno_box'] = torch.cat(\n                    (preds_dict[0]['reg'], preds_dict[0]['height'],\n                     preds_dict[0]['dim'], preds_dict[0]['rot'],\n                     preds_dict[0]['vel']),\n                    dim=1,\n                )\n            else:\n                preds_dict[0]['anno_box'] = torch.cat(\n                    (preds_dict[0]['reg'], preds_dict[0]['height'],\n                     preds_dict[0]['dim'], preds_dict[0]['rot']),\n                    dim=1,\n                )\n            # Regression loss for dimension, offset, height, rotation\n            num = masks[task_id].float().sum()\n            ind = inds[task_id]\n            pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous()\n            pred = pred.view(pred.size(0), -1, pred.size(3))\n            pred = self._gather_feat(pred, ind)\n            mask = masks[task_id].unsqueeze(2).expand_as(target_box).float()\n            num = torch.clamp(reduce_mean(target_box.new_tensor(num)),\n                              min=1e-4).item()\n            isnotnan = (~torch.isnan(target_box)).float()\n            mask *= isnotnan\n            code_weights = self.train_cfg['code_weights']\n            bbox_weights = mask * mask.new_tensor(code_weights)\n            loss_bbox = self.loss_bbox(pred,\n                                       target_box,\n                                       bbox_weights,\n                                       avg_factor=num)\n            return_loss += loss_bbox\n            return_loss += loss_heatmap\n        return return_loss\n\n    def get_bboxes(self, preds_dicts, img_metas, img=None, rescale=False):\n        \"\"\"Generate bboxes from bbox head predictions.\n\n        Args:\n            preds_dicts (tuple[list[dict]]): Prediction results.\n            img_metas (list[dict]): Point cloud and image's meta info.\n\n        Returns:\n            list[dict]: Decoded bbox, scores and labels after nms.\n        \"\"\"\n        rets = []\n        for task_id, preds_dict in enumerate(preds_dicts):\n            num_class_with_bg = self.num_classes[task_id]\n            batch_size = preds_dict[0]['heatmap'].shape[0]\n            batch_heatmap = preds_dict[0]['heatmap'].sigmoid()\n\n            batch_reg = preds_dict[0]['reg']\n            batch_hei = preds_dict[0]['height']\n\n            if self.norm_bbox:\n                batch_dim = torch.exp(preds_dict[0]['dim'])\n            else:\n                batch_dim = preds_dict[0]['dim']\n\n            batch_rots = preds_dict[0]['rot'][:, 0].unsqueeze(1)\n            batch_rotc = preds_dict[0]['rot'][:, 1].unsqueeze(1)\n\n            if 'vel' in preds_dict[0]:\n                batch_vel = preds_dict[0]['vel']\n            else:\n                batch_vel = None\n            temp = self.bbox_coder.decode(batch_heatmap,\n                                          batch_rots,\n                                          batch_rotc,\n                                          batch_hei,\n                                          batch_dim,\n                                          batch_vel,\n                                          reg=batch_reg,\n                                          task_id=task_id)\n            assert self.test_cfg['nms_type'] in [\n                'size_aware_circle', 'circle', 'rotate'\n            ]\n            batch_reg_preds = [box['bboxes'] for box in temp]\n            batch_cls_preds = [box['scores'] for box in temp]\n            batch_cls_labels = [box['labels'] for box in temp]\n            if self.test_cfg['nms_type'] == 'circle':\n                ret_task = []\n                for i in range(batch_size):\n                    boxes3d = temp[i]['bboxes']\n                    scores = temp[i]['scores']\n                    labels = temp[i]['labels']\n                    centers = boxes3d[:, [0, 1]]\n                    boxes = torch.cat([centers, scores.view(-1, 1)], dim=1)\n                    keep = torch.tensor(circle_nms(\n                        boxes.detach().cpu().numpy(),\n                        self.test_cfg['min_radius'][task_id],\n                        post_max_size=self.test_cfg['post_max_size']),\n                                        dtype=torch.long,\n                                        device=boxes.device)\n\n                    boxes3d = boxes3d[keep]\n                    scores = scores[keep]\n                    labels = labels[keep]\n                    ret = dict(bboxes=boxes3d, scores=scores, labels=labels)\n                    ret_task.append(ret)\n                rets.append(ret_task)\n            elif self.test_cfg['nms_type'] == 'size_aware_circle':\n                ret_task = []\n                for i in range(batch_size):\n                    boxes3d = temp[i]['bboxes']\n                    scores = temp[i]['scores']\n                    labels = temp[i]['labels']\n                    boxes_2d = boxes3d[:, [0, 1, 3, 4, 6]]\n                    boxes = torch.cat([boxes_2d, scores.view(-1, 1)], dim=1)\n                    keep = torch.tensor(\n                        size_aware_circle_nms(\n                            boxes.detach().cpu().numpy(),\n                            self.test_cfg['thresh_scale'][task_id],\n                            post_max_size=self.test_cfg['post_max_size'],\n                        ),\n                        dtype=torch.long,\n                        device=boxes.device,\n                    )\n\n                    boxes3d = boxes3d[keep]\n                    scores = scores[keep]\n                    labels = labels[keep]\n                    ret = dict(bboxes=boxes3d, scores=scores, labels=labels)\n                    ret_task.append(ret)\n                rets.append(ret_task)\n            else:\n                rets.append(\n                    self.get_task_detections(num_class_with_bg,\n                                             batch_cls_preds, batch_reg_preds,\n                                             batch_cls_labels, img_metas))\n\n        # Merge branches results\n        num_samples = len(rets[0])\n\n        ret_list = []\n        for i in range(num_samples):\n            for k in rets[0][i].keys():\n                if k == 'bboxes':\n                    bboxes = torch.cat([ret[i][k] for ret in rets])\n                elif k == 'scores':\n                    scores = torch.cat([ret[i][k] for ret in rets])\n                elif k == 'labels':\n                    flag = 0\n                    for j, num_class in enumerate(self.num_classes):\n                        rets[j][i][k] += flag\n                        flag += num_class\n                    labels = torch.cat([ret[i][k].int() for ret in rets])\n            ret_list.append([bboxes, scores, labels])\n        return ret_list\n"
  },
  {
    "path": "bevdepth/models/base_bev_depth.py",
    "content": "from torch import nn\n\nfrom bevdepth.layers.backbones.base_lss_fpn import BaseLSSFPN\nfrom bevdepth.layers.heads.bev_depth_head import BEVDepthHead\n\n__all__ = ['BaseBEVDepth']\n\n\nclass BaseBEVDepth(nn.Module):\n    \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n    Args:\n        backbone_conf (dict): Config of backbone.\n        head_conf (dict): Config of head.\n        is_train_depth (bool): Whether to return depth.\n            Default: False.\n    \"\"\"\n\n    # TODO: Reduce grid_conf and data_aug_conf\n    def __init__(self, backbone_conf, head_conf, is_train_depth=False):\n        super(BaseBEVDepth, self).__init__()\n        self.backbone = BaseLSSFPN(**backbone_conf)\n        self.head = BEVDepthHead(**head_conf)\n        self.is_train_depth = is_train_depth\n\n    def forward(\n        self,\n        x,\n        mats_dict,\n        timestamps=None,\n    ):\n        \"\"\"Forward function for BEVDepth\n\n        Args:\n            x (Tensor): Input ferature map.\n            mats_dict(dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            timestamps (long): Timestamp.\n                Default: None.\n\n        Returns:\n            tuple(list[dict]): Output results for tasks.\n        \"\"\"\n        if self.is_train_depth and self.training:\n            x, depth_pred = self.backbone(x,\n                                          mats_dict,\n                                          timestamps,\n                                          is_return_depth=True)\n            preds = self.head(x)\n            return preds, depth_pred\n        else:\n            x = self.backbone(x, mats_dict, timestamps)\n            preds = self.head(x)\n            return preds\n\n    def get_targets(self, gt_boxes, gt_labels):\n        \"\"\"Generate training targets for a single sample.\n\n        Args:\n            gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n            gt_labels_3d (torch.Tensor): Labels of boxes.\n\n        Returns:\n            tuple[list[torch.Tensor]]: Tuple of target including \\\n                the following results in order.\n\n                - list[torch.Tensor]: Heatmap scores.\n                - list[torch.Tensor]: Ground truth boxes.\n                - list[torch.Tensor]: Indexes indicating the position \\\n                    of the valid boxes.\n                - list[torch.Tensor]: Masks indicating which boxes \\\n                    are valid.\n        \"\"\"\n        return self.head.get_targets(gt_boxes, gt_labels)\n\n    def loss(self, targets, preds_dicts):\n        \"\"\"Loss function for BEVDepth.\n\n        Args:\n            gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n                truth gt boxes.\n            gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n            preds_dicts (dict): Output of forward function.\n\n        Returns:\n            dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n        \"\"\"\n        return self.head.loss(targets, preds_dicts)\n\n    def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n        \"\"\"Generate bboxes from bbox head predictions.\n\n        Args:\n            preds_dicts (tuple[list[dict]]): Prediction results.\n            img_metas (list[dict]): Point cloud and image's meta info.\n\n        Returns:\n            list[dict]: Decoded bbox, scores and labels after nms.\n        \"\"\"\n        return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)\n"
  },
  {
    "path": "bevdepth/models/bev_stereo.py",
    "content": "from bevdepth.layers.backbones.bevstereo_lss_fpn import BEVStereoLSSFPN\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth\n\n__all__ = ['BEVStereo']\n\n\nclass BEVStereo(BaseBEVDepth):\n    \"\"\"Source code of `BEVStereo`, `https://arxiv.org/abs/2209.10248`.\n\n    Args:\n        backbone_conf (dict): Config of backbone.\n        head_conf (dict): Config of head.\n        is_train_depth (bool): Whether to return depth.\n            Default: False.\n    \"\"\"\n\n    # TODO: Reduce grid_conf and data_aug_conf\n    def __init__(self, backbone_conf, head_conf, is_train_depth=False):\n        super(BEVStereo, self).__init__(backbone_conf, head_conf,\n                                        is_train_depth)\n        self.backbone = BEVStereoLSSFPN(**backbone_conf)\n"
  },
  {
    "path": "bevdepth/models/fusion_bev_depth.py",
    "content": "from bevdepth.layers.backbones.fusion_lss_fpn import FusionLSSFPN\nfrom bevdepth.layers.heads.bev_depth_head import BEVDepthHead\n\nfrom .base_bev_depth import BaseBEVDepth\n\n__all__ = ['FusionBEVDepth']\n\n\nclass FusionBEVDepth(BaseBEVDepth):\n    \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n    Args:\n        backbone_conf (dict): Config of backbone.\n        head_conf (dict): Config of head.\n        is_train_depth (bool): Whether to return depth.\n            Default: False.\n    \"\"\"\n\n    # TODO: Reduce grid_conf and data_aug_conf\n    def __init__(self, backbone_conf, head_conf, is_train_depth=False):\n        super(BaseBEVDepth, self).__init__()\n        self.backbone = FusionLSSFPN(**backbone_conf)\n        self.head = BEVDepthHead(**head_conf)\n        self.is_train_depth = is_train_depth\n\n    def forward(\n        self,\n        x,\n        mats_dict,\n        lidar_depth,\n        timestamps=None,\n    ):\n        \"\"\"Forward function for BEVDepth\n\n        Args:\n            x (Tensor): Input feature map.\n            mats_dict(dict):\n                sensor2ego_mats(Tensor): Transformation matrix from\n                    camera to ego with shape of (B, num_sweeps,\n                    num_cameras, 4, 4).\n                intrin_mats(Tensor): Intrinsic matrix with shape\n                    of (B, num_sweeps, num_cameras, 4, 4).\n                ida_mats(Tensor): Transformation matrix for ida with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                sensor2sensor_mats(Tensor): Transformation matrix\n                    from key frame camera to sweep frame camera with\n                    shape of (B, num_sweeps, num_cameras, 4, 4).\n                bda_mat(Tensor): Rotation matrix for bda with shape\n                    of (B, 4, 4).\n            lidar_depth (Tensor): Depth generated by lidar.\n            timestamps (long): Timestamp.\n                Default: None.\n\n        Returns:\n            tuple(list[dict]): Output results for tasks.\n        \"\"\"\n        if self.is_train_depth and self.training:\n            x = self.backbone(x, mats_dict, lidar_depth, timestamps)\n            preds = self.head(x)\n            return preds\n        else:\n            x = self.backbone(x, mats_dict, lidar_depth, timestamps)\n            preds = self.head(x)\n            return preds\n"
  },
  {
    "path": "bevdepth/models/matrixvt_det.py",
    "content": "from bevdepth.layers.backbones.matrixvt import MatrixVT\nfrom bevdepth.models.base_bev_depth import BaseBEVDepth\n\n\nclass MatrixVT_Det(BaseBEVDepth):\n    \"\"\"Implementation of MatrixVT for Object Detection.\n\n        Args:\n        backbone_conf (dict): Config of backbone.\n        head_conf (dict): Config of head.\n        is_train_depth (bool): Whether to return depth.\n            Default: False.\n    \"\"\"\n\n    def __init__(self, backbone_conf, head_conf, is_train_depth=False):\n        super().__init__(backbone_conf, head_conf, is_train_depth)\n        self.backbone = MatrixVT(**backbone_conf)\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_inference/__init__.py",
    "content": "from .voxel_pooling_inference import voxel_pooling_inference\n\n__all__ = ['voxel_pooling_inference']\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_inference/src/voxel_pooling_inference_forward.cpp",
    "content": "// Copyright (c) Megvii Inc. All rights reserved.\n#include <ATen/cuda/CUDAContext.h>\n#include <cuda.h>\n#include <cuda_fp16.h>\n#include <cuda_runtime_api.h>\n#include <torch/extension.h>\n#include <torch/serialize/tensor.h>\n\n#include <vector>\n\n#define CHECK_CUDA(x) \\\n  TORCH_CHECK(x.type().is_cuda(), #x, \" must be a CUDAtensor \")\n#define CHECK_CONTIGUOUS(x) \\\n  TORCH_CHECK(x.is_contiguous(), #x, \" must be contiguous \")\n#define CHECK_INPUT(x) \\\n  CHECK_CUDA(x);       \\\n  CHECK_CONTIGUOUS(x)\n\nint voxel_pooling_inference_forward_wrapper(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    at::Tensor geom_xyz_tensor, at::Tensor depth_features_tensor,\n    at::Tensor context_features_tensor, at::Tensor output_features_tensor);\n\nvoid voxel_pooling_inference_forward_kernel_launcher(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    const int *geom_xyz, const float *depth_features,\n    const float *context_features, float *output_features, cudaStream_t stream);\n\nvoid voxel_pooling_inference_forward_kernel_launcher(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    const int *geom_xyz, const half *depth_features,\n    const half *context_features, half *output_features, cudaStream_t stream);\n\nint voxel_pooling_inference_forward_wrapper(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    at::Tensor geom_xyz_tensor, at::Tensor depth_features_tensor,\n    at::Tensor context_features_tensor, at::Tensor output_features_tensor) {\n  CHECK_INPUT(geom_xyz_tensor);\n  CHECK_INPUT(depth_features_tensor);\n  CHECK_INPUT(context_features_tensor);\n  cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();\n  const int *geom_xyz = geom_xyz_tensor.data_ptr<int>();\n  if (depth_features_tensor.dtype() == at::kFloat) {\n    const float *depth_features = depth_features_tensor.data_ptr<float>();\n    const float *context_features = context_features_tensor.data_ptr<float>();\n    float *output_features = output_features_tensor.data_ptr<float>();\n    voxel_pooling_inference_forward_kernel_launcher(\n        batch_size, num_cams, num_depth, num_height, num_width, num_channels,\n        num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, depth_features,\n        context_features, output_features, stream);\n  } else if (depth_features_tensor.dtype() == at::kHalf) {\n    assert(num_channels % 2 == 0);\n    const half *depth_features =\n        (half *)depth_features_tensor.data_ptr<at::Half>();\n    const half *context_features =\n        (half *)context_features_tensor.data_ptr<at::Half>();\n    half *output_features = (half *)output_features_tensor.data_ptr<at::Half>();\n    voxel_pooling_inference_forward_kernel_launcher(\n        batch_size, num_cams, num_depth, num_height, num_width, num_channels,\n        num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, depth_features,\n        context_features, output_features, stream);\n  }\n\n  return 1;\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n  m.def(\"voxel_pooling_inference_forward_wrapper\",\n        &voxel_pooling_inference_forward_wrapper,\n        \"voxel_pooling_inference_forward_wrapper\");\n}\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_inference/src/voxel_pooling_inference_forward_cuda.cu",
    "content": "// Copyright (c) Megvii Inc. All rights reserved.\n#include <cuda_fp16.h>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define THREADS_BLOCK_X 32\n#define THREADS_BLOCK_Y 4\n#define THREADS_PER_BLOCK THREADS_BLOCK_X *THREADS_BLOCK_Y\n#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))\n\n__global__ void voxel_pooling_inference_forward_kernel(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    const int *geom_xyz, const half *depth_features,\n    const half *context_features, half *output_features) {\n  const int bidx = blockIdx.x;\n  const int tidx = threadIdx.x;\n  const int tidy = threadIdx.y;\n  const int sample_dim = THREADS_PER_BLOCK;\n  const int idx_in_block = tidy * THREADS_BLOCK_X + tidx;\n  const int batch_size_with_cams = batch_size * num_cams;\n  const int block_sample_idx = bidx * sample_dim;\n  const int thread_sample_idx = block_sample_idx + idx_in_block;\n\n  const int total_samples =\n      batch_size_with_cams * num_depth * num_height * num_width;\n  __shared__ int geom_xyz_shared[THREADS_PER_BLOCK * 3];\n\n  if (thread_sample_idx < total_samples) {\n    const int sample_x = geom_xyz[thread_sample_idx * 3 + 0];\n    const int sample_y = geom_xyz[thread_sample_idx * 3 + 1];\n    const int sample_z = geom_xyz[thread_sample_idx * 3 + 2];\n    geom_xyz_shared[idx_in_block * 3 + 0] = sample_x;\n    geom_xyz_shared[idx_in_block * 3 + 1] = sample_y;\n    geom_xyz_shared[idx_in_block * 3 + 2] = sample_z;\n  }\n\n  __syncthreads();\n\n  for (int i = tidy;\n       i < THREADS_PER_BLOCK && block_sample_idx + i < total_samples;\n       i += THREADS_BLOCK_Y) {\n    const int sample_x = geom_xyz_shared[i * 3 + 0];\n    const int sample_y = geom_xyz_shared[i * 3 + 1];\n    const int sample_z = geom_xyz_shared[i * 3 + 2];\n    if (sample_x < 0 || sample_x >= num_voxel_x || sample_y < 0 ||\n        sample_y >= num_voxel_y || sample_z < 0 || sample_z >= num_voxel_z) {\n      continue;\n    }\n    const int sample_idx = block_sample_idx + i;\n    const int batch_idx =\n        sample_idx / (num_cams * num_depth * num_height * num_width);\n    const int width_idx = sample_idx % num_width;\n    const int height_idx = (sample_idx / num_width) % num_height;\n    const int cam_idx =\n        sample_idx / (num_depth * num_height * num_width) % num_cams;\n    // if(batch_idx > 0 || cam_idx > 0)\n    // printf(\"batch_idx: %d, width_idx: %d, height_idx: %d, cam_idx: %d \\n\",\n    // batch_idx, width_idx, height_idx, cam_idx);\n    const half depth_val = depth_features[sample_idx];\n    half res;\n    for (int j = tidx; j < num_channels; j += THREADS_BLOCK_X) {\n      const half context_val = context_features\n          [batch_idx * (num_cams * num_channels * num_height * num_width) +\n           cam_idx * (num_channels * num_height * num_width) +\n           j * (num_height * num_width) + height_idx * num_width + width_idx];\n      res = __hmul(depth_val, context_val);\n      atomicAdd(&output_features[(batch_idx * num_voxel_y * num_voxel_x +\n                                  sample_y * num_voxel_x + sample_x) *\n                                     num_channels +\n                                 j],\n                res);\n    }\n  }\n}\n\n__global__ void voxel_pooling_inference_forward_kernel(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    const int *geom_xyz, const float *depth_features,\n    const float *context_features, float *output_features) {\n  const int bidx = blockIdx.x;\n  const int tidx = threadIdx.x;\n  const int tidy = threadIdx.y;\n  const int sample_dim = THREADS_PER_BLOCK;\n  const int idx_in_block = tidy * THREADS_BLOCK_X + tidx;\n  const int batch_size_with_cams = batch_size * num_cams;\n  const int block_sample_idx = bidx * sample_dim;\n  const int thread_sample_idx = block_sample_idx + idx_in_block;\n\n  const int total_samples =\n      batch_size_with_cams * num_depth * num_height * num_width;\n  // printf(\"Total sample:%d num_cams: %d, num_depth: %d num_height: %d\n  // num_width: %d\\n\", total_samples, num_cams, num_depth, num_height,\n  // num_width);\n  __shared__ int geom_xyz_shared[THREADS_PER_BLOCK * 3];\n\n  if (thread_sample_idx < total_samples) {\n    const int sample_x = geom_xyz[thread_sample_idx * 3 + 0];\n    const int sample_y = geom_xyz[thread_sample_idx * 3 + 1];\n    const int sample_z = geom_xyz[thread_sample_idx * 3 + 2];\n    geom_xyz_shared[idx_in_block * 3 + 0] = sample_x;\n    geom_xyz_shared[idx_in_block * 3 + 1] = sample_y;\n    geom_xyz_shared[idx_in_block * 3 + 2] = sample_z;\n  }\n\n  __syncthreads();\n\n  for (int i = tidy;\n       i < THREADS_PER_BLOCK && block_sample_idx + i < total_samples;\n       i += THREADS_BLOCK_Y) {\n    const int sample_x = geom_xyz_shared[i * 3 + 0];\n    const int sample_y = geom_xyz_shared[i * 3 + 1];\n    const int sample_z = geom_xyz_shared[i * 3 + 2];\n    if (sample_x < 0 || sample_x >= num_voxel_x || sample_y < 0 ||\n        sample_y >= num_voxel_y || sample_z < 0 || sample_z >= num_voxel_z) {\n      continue;\n    }\n    const int sample_idx = block_sample_idx + i;\n    const int batch_idx =\n        sample_idx / (num_cams * num_depth * num_height * num_width);\n    const int width_idx = sample_idx % num_width;\n    const int height_idx = (sample_idx / num_width) % num_height;\n    const int cam_idx =\n        sample_idx / (num_depth * num_height * num_width) % num_cams;\n    const float depth_val = depth_features[sample_idx];\n    for (int j = tidx; j < num_channels; j += THREADS_BLOCK_X) {\n      const float context_val = context_features\n          [batch_idx * (num_cams * num_channels * num_height * num_width) +\n           cam_idx * (num_channels * num_height * num_width) +\n           j * (num_height * num_width) + height_idx * num_width + width_idx];\n      atomicAdd(&output_features[(batch_idx * num_voxel_y * num_voxel_x +\n                                  sample_y * num_voxel_x + sample_x) *\n                                     num_channels +\n                                 j],\n                depth_val * context_val);\n    }\n  }\n}\n\nvoid voxel_pooling_inference_forward_kernel_launcher(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    const int *geom_xyz, const float *depth_features,\n    const float *context_features, float *output_features,\n    cudaStream_t stream) {\n  cudaError_t err;\n  dim3 blocks(DIVUP(batch_size * num_cams * num_depth * num_height * num_width,\n                    THREADS_PER_BLOCK));\n  dim3 threads(THREADS_BLOCK_X, THREADS_BLOCK_Y);\n\n  voxel_pooling_inference_forward_kernel<<<blocks, threads, 0, stream>>>(\n      batch_size, num_cams, num_depth, num_height, num_width, num_channels,\n      num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, depth_features,\n      context_features, output_features);\n  err = cudaGetLastError();\n  if (cudaSuccess != err) {\n    fprintf(stderr, \"CUDA kernel failed : %s\\n\", cudaGetErrorString(err));\n    exit(-1);\n  }\n}\n\nvoid voxel_pooling_inference_forward_kernel_launcher(\n    int batch_size, int num_cams, int num_depth, int num_height, int num_width,\n    int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z,\n    const int *geom_xyz, const half *depth_features,\n    const half *context_features, half *output_features, cudaStream_t stream) {\n  cudaError_t err;\n  dim3 blocks(DIVUP(batch_size * num_cams * num_depth * num_height * num_width,\n                    THREADS_PER_BLOCK));\n  dim3 threads(THREADS_BLOCK_X, THREADS_BLOCK_Y);\n\n  voxel_pooling_inference_forward_kernel<<<blocks, threads, 0, stream>>>(\n      batch_size, num_cams, num_depth, num_height, num_width, num_channels,\n      num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, depth_features,\n      context_features, output_features);\n  err = cudaGetLastError();\n  if (cudaSuccess != err) {\n    fprintf(stderr, \"CUDA kernel failed : %s\\n\", cudaGetErrorString(err));\n    exit(-1);\n  }\n}\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_inference/voxel_pooling_inference.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nfrom torch.autograd import Function\n\nfrom . import voxel_pooling_inference_ext\n\n\nclass VoxelPoolingInference(Function):\n\n    @staticmethod\n    def forward(ctx, geom_xyz: torch.Tensor, depth_features: torch.Tensor,\n                context_features: torch.Tensor,\n                voxel_num: torch.Tensor) -> torch.Tensor:\n        \"\"\"Forward function for `voxel pooling.\n\n        Args:\n            geom_xyz (Tensor): xyz coord for each voxel with the shape\n                of [B, N, 3].\n            input_features (Tensor): feature for each voxel with the\n                shape of [B, N, C].\n            voxel_num (Tensor): Number of voxels for each dim with the\n                shape of [3].\n\n        Returns:\n            Tensor: (B, C, H, W) bev feature map.\n        \"\"\"\n        assert geom_xyz.is_contiguous()\n        assert depth_features.is_contiguous()\n        assert context_features.is_contiguous()\n        # no gradient for input_features and geom_feats\n        ctx.mark_non_differentiable(geom_xyz)\n        batch_size = geom_xyz.shape[0]\n        num_cams = geom_xyz.shape[1]\n        num_depth = geom_xyz.shape[2]\n        num_height = geom_xyz.shape[3]\n        num_width = geom_xyz.shape[4]\n        num_channels = context_features.shape[1]\n        output_features = depth_features.new_zeros(\n            (batch_size, voxel_num[1], voxel_num[0], num_channels))\n        voxel_pooling_inference_ext.voxel_pooling_inference_forward_wrapper(\n            batch_size,\n            num_cams,\n            num_depth,\n            num_height,\n            num_width,\n            num_channels,\n            voxel_num[0],\n            voxel_num[1],\n            voxel_num[2],\n            geom_xyz,\n            depth_features,\n            context_features,\n            output_features,\n        )\n        return output_features.permute(0, 3, 1, 2)\n\n\nvoxel_pooling_inference = VoxelPoolingInference.apply\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_train/__init__.py",
    "content": "from .voxel_pooling_train import voxel_pooling_train\n\n__all__ = ['voxel_pooling_train']\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_train/src/voxel_pooling_train_forward.cpp",
    "content": "// Copyright (c) Megvii Inc. All rights reserved.\n#include <ATen/cuda/CUDAContext.h>\n#include <cuda.h>\n#include <cuda_fp16.h>\n#include <cuda_runtime_api.h>\n#include <torch/extension.h>\n#include <torch/serialize/tensor.h>\n\n#include <vector>\n#define CHECK_CUDA(x) \\\n  TORCH_CHECK(x.type().is_cuda(), #x, \" must be a CUDAtensor \")\n#define CHECK_CONTIGUOUS(x) \\\n  TORCH_CHECK(x.is_contiguous(), #x, \" must be contiguous \")\n#define CHECK_INPUT(x) \\\n  CHECK_CUDA(x);       \\\n  CHECK_CONTIGUOUS(x)\n\nint voxel_pooling_train_forward_wrapper(int batch_size, int num_points,\n                                        int num_channels, int num_voxel_x,\n                                        int num_voxel_y, int num_voxel_z,\n                                        at::Tensor geom_xyz_tensor,\n                                        at::Tensor input_features_tensor,\n                                        at::Tensor output_features_tensor,\n                                        at::Tensor pos_memo_tensor);\n\nvoid voxel_pooling_train_forward_kernel_launcher(\n    int batch_size, int num_points, int num_channels, int num_voxel_x,\n    int num_voxel_y, int num_voxel_z, const int *geom_xyz,\n    const float *input_features, float *output_features, int *pos_memo,\n    cudaStream_t stream);\n\nvoid voxel_pooling_train_forward_kernel_launcher(\n    int batch_size, int num_points, int num_channels, int num_voxel_x,\n    int num_voxel_y, int num_voxel_z, const int *geom_xyz,\n    const half *input_features, half *output_features, int *pos_memo,\n    cudaStream_t stream);\n\nint voxel_pooling_train_forward_wrapper(int batch_size, int num_points,\n                                        int num_channels, int num_voxel_x,\n                                        int num_voxel_y, int num_voxel_z,\n                                        at::Tensor geom_xyz_tensor,\n                                        at::Tensor input_features_tensor,\n                                        at::Tensor output_features_tensor,\n                                        at::Tensor pos_memo_tensor) {\n  CHECK_INPUT(geom_xyz_tensor);\n  CHECK_INPUT(input_features_tensor);\n  const int *geom_xyz = geom_xyz_tensor.data_ptr<int>();\n  int *pos_memo = pos_memo_tensor.data_ptr<int>();\n\n  cudaStream_t stream = at::cuda::getCurrentCUDAStream().stream();\n  if (input_features_tensor.dtype() == at::kFloat) {\n    const float *input_features = input_features_tensor.data_ptr<float>();\n    float *output_features = output_features_tensor.data_ptr<float>();\n    voxel_pooling_train_forward_kernel_launcher(\n        batch_size, num_points, num_channels, num_voxel_x, num_voxel_y,\n        num_voxel_z, geom_xyz, input_features, output_features, pos_memo,\n        stream);\n  }\n\n  else if (input_features_tensor.dtype() == at::kHalf) {\n    assert(num_channels % 2 == 0);\n    const half *input_features =\n        (half *)(input_features_tensor.data_ptr<at::Half>());\n    half *output_features =\n        (half *)(output_features_tensor.data_ptr<at::Half>());\n    voxel_pooling_train_forward_kernel_launcher(\n        batch_size, num_points, num_channels, num_voxel_x, num_voxel_y,\n        num_voxel_z, geom_xyz, input_features, output_features, pos_memo,\n        stream);\n  }\n\n  return 1;\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n  m.def(\"voxel_pooling_train_forward_wrapper\",\n        &voxel_pooling_train_forward_wrapper,\n        \"voxel_pooling_train_forward_wrapper\");\n}\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_train/src/voxel_pooling_train_forward_cuda.cu",
    "content": "// Copyright (c) Megvii Inc. All rights reserved.\n#include <cuda_fp16.h>\n#include <math.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#define THREADS_BLOCK_X 32\n#define THREADS_BLOCK_Y 4\n#define THREADS_PER_BLOCK THREADS_BLOCK_X *THREADS_BLOCK_Y\n#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))\n\ntemplate <typename T>\n__global__ void voxel_pooling_train_forward_kernel(\n    int batch_size, int num_points, int num_channels, int num_voxel_x,\n    int num_voxel_y, int num_voxel_z, const int *geom_xyz,\n    const T *input_features, T *output_features, int *pos_memo) {\n  const int bidx = blockIdx.x;\n  const int tidx = threadIdx.x;\n  const int tidy = threadIdx.y;\n  const int sample_dim = THREADS_PER_BLOCK;\n  const int idx_in_block = tidy * THREADS_BLOCK_X + tidx;\n\n  const int block_sample_idx = bidx * sample_dim;\n  const int thread_sample_idx = block_sample_idx + idx_in_block;\n\n  const int total_samples = batch_size * num_points;\n\n  __shared__ int geom_xyz_shared[THREADS_PER_BLOCK * 3];\n\n  if (thread_sample_idx < total_samples) {\n    const int sample_x = geom_xyz[thread_sample_idx * 3 + 0];\n    const int sample_y = geom_xyz[thread_sample_idx * 3 + 1];\n    const int sample_z = geom_xyz[thread_sample_idx * 3 + 2];\n    geom_xyz_shared[idx_in_block * 3 + 0] = sample_x;\n    geom_xyz_shared[idx_in_block * 3 + 1] = sample_y;\n    geom_xyz_shared[idx_in_block * 3 + 2] = sample_z;\n    if ((sample_x >= 0 && sample_x < num_voxel_x) &&\n        (sample_y >= 0 && sample_y < num_voxel_y) &&\n        (sample_z >= 0 && sample_z < num_voxel_z)) {\n      pos_memo[thread_sample_idx * 3 + 0] = thread_sample_idx / num_points;\n      pos_memo[thread_sample_idx * 3 + 1] = sample_y;\n      pos_memo[thread_sample_idx * 3 + 2] = sample_x;\n    }\n  }\n\n  __syncthreads();\n\n  for (int i = tidy;\n       i < THREADS_PER_BLOCK && block_sample_idx + i < total_samples;\n       i += THREADS_BLOCK_Y) {\n    const int sample_x = geom_xyz_shared[i * 3 + 0];\n    const int sample_y = geom_xyz_shared[i * 3 + 1];\n    const int sample_z = geom_xyz_shared[i * 3 + 2];\n    if (sample_x < 0 || sample_x >= num_voxel_x || sample_y < 0 ||\n        sample_y >= num_voxel_y || sample_z < 0 || sample_z >= num_voxel_z) {\n      continue;\n    }\n    const int batch_idx = (block_sample_idx + i) / num_points;\n    for (int j = tidx; j < num_channels; j += THREADS_BLOCK_X) {\n      atomicAdd(&output_features[(batch_idx * num_voxel_y * num_voxel_x +\n                                  sample_y * num_voxel_x + sample_x) *\n                                     num_channels +\n                                 j],\n                input_features[(block_sample_idx + i) * num_channels + j]);\n    }\n  }\n}\n\nvoid voxel_pooling_train_forward_kernel_launcher(\n    int batch_size, int num_points, int num_channels, int num_voxel_x,\n    int num_voxel_y, int num_voxel_z, const int *geom_xyz,\n    const float *input_features, float *output_features, int *pos_memo,\n    cudaStream_t stream) {\n  cudaError_t err;\n\n  dim3 blocks(DIVUP(batch_size * num_points, THREADS_PER_BLOCK));\n  dim3 threads(THREADS_BLOCK_X, THREADS_BLOCK_Y);\n\n  voxel_pooling_train_forward_kernel<<<blocks, threads, 0, stream>>>(\n      batch_size, num_points, num_channels, num_voxel_x, num_voxel_y,\n      num_voxel_z, geom_xyz, input_features, output_features, pos_memo);\n  err = cudaGetLastError();\n  if (cudaSuccess != err) {\n    fprintf(stderr, \"CUDA kernel failed : %s\\n\", cudaGetErrorString(err));\n    exit(-1);\n  }\n}\n\nvoid voxel_pooling_train_forward_kernel_launcher(\n    int batch_size, int num_points, int num_channels, int num_voxel_x,\n    int num_voxel_y, int num_voxel_z, const int *geom_xyz,\n    const half *input_features, half *output_features, int *pos_memo,\n    cudaStream_t stream) {\n  cudaError_t err;\n\n  dim3 blocks(DIVUP(batch_size * num_points, THREADS_PER_BLOCK));\n  dim3 threads(THREADS_BLOCK_X, THREADS_BLOCK_Y);\n\n  voxel_pooling_train_forward_kernel<<<blocks, threads, 0, stream>>>(\n      batch_size, num_points, num_channels, num_voxel_x, num_voxel_y,\n      num_voxel_z, geom_xyz, input_features, output_features, pos_memo);\n  err = cudaGetLastError();\n  if (cudaSuccess != err) {\n    fprintf(stderr, \"CUDA kernel failed : %s\\n\", cudaGetErrorString(err));\n    exit(-1);\n  }\n}\n"
  },
  {
    "path": "bevdepth/ops/voxel_pooling_train/voxel_pooling_train.py",
    "content": "# Copyright (c) Megvii Inc. All rights reserved.\nimport torch\nfrom torch.autograd import Function\n\nfrom . import voxel_pooling_train_ext\n\n\nclass VoxelPoolingTrain(Function):\n\n    @staticmethod\n    def forward(ctx, geom_xyz: torch.Tensor, input_features: torch.Tensor,\n                voxel_num: torch.Tensor) -> torch.Tensor:\n        \"\"\"Forward function for `voxel pooling.\n\n        Args:\n            geom_xyz (Tensor): xyz coord for each voxel with the shape\n                of [B, N, 3].\n            input_features (Tensor): feature for each voxel with the\n                shape of [B, N, C].\n            voxel_num (Tensor): Number of voxels for each dim with the\n                shape of [3].\n\n        Returns:\n            Tensor: (B, C, H, W) bev feature map.\n        \"\"\"\n        assert geom_xyz.is_contiguous()\n        assert input_features.is_contiguous()\n        # no gradient for input_features and geom_feats\n        ctx.mark_non_differentiable(geom_xyz)\n        grad_input_features = torch.zeros_like(input_features)\n        geom_xyz = geom_xyz.reshape(geom_xyz.shape[0], -1, geom_xyz.shape[-1])\n        input_features = input_features.reshape(\n            (geom_xyz.shape[0], -1, input_features.shape[-1]))\n        assert geom_xyz.shape[1] == input_features.shape[1]\n        batch_size = input_features.shape[0]\n        num_points = input_features.shape[1]\n        num_channels = input_features.shape[2]\n        output_features = input_features.new_zeros(batch_size, voxel_num[1],\n                                                   voxel_num[0], num_channels)\n        # Save the position of bev_feature_map for each input point.\n        pos_memo = geom_xyz.new_ones(batch_size, num_points, 3) * -1\n        voxel_pooling_train_ext.voxel_pooling_train_forward_wrapper(\n            batch_size,\n            num_points,\n            num_channels,\n            voxel_num[0],\n            voxel_num[1],\n            voxel_num[2],\n            geom_xyz,\n            input_features,\n            output_features,\n            pos_memo,\n        )\n        # save grad_input_features and pos_memo for backward\n        ctx.save_for_backward(grad_input_features, pos_memo)\n        return output_features.permute(0, 3, 1, 2)\n\n    @staticmethod\n    def backward(ctx, grad_output_features):\n        (grad_input_features, pos_memo) = ctx.saved_tensors\n        kept = (pos_memo != -1)[..., 0]\n        grad_input_features_shape = grad_input_features.shape\n        grad_input_features = grad_input_features.reshape(\n            grad_input_features.shape[0], -1, grad_input_features.shape[-1])\n        grad_input_features[kept] = grad_output_features[\n            pos_memo[kept][..., 0].long(), :, pos_memo[kept][..., 1].long(),\n            pos_memo[kept][..., 2].long()]\n        grad_input_features = grad_input_features.reshape(\n            grad_input_features_shape)\n        return None, grad_input_features, None\n\n\nvoxel_pooling_train = VoxelPoolingTrain.apply\n"
  },
  {
    "path": "bevdepth/utils/torch_dist.py",
    "content": "\"\"\"\n@author: zeming li\n@contact: zengarden2009@gmail.com\n\"\"\"\nfrom torch import distributed as dist\n\n\ndef get_rank() -> int:\n    if not dist.is_available():\n        return 0\n    if not dist.is_initialized():\n        return 0\n    return dist.get_rank()\n\n\ndef get_world_size() -> int:\n    if not dist.is_available():\n        return 1\n    if not dist.is_initialized():\n        return 1\n    return dist.get_world_size()\n\n\ndef synchronize():\n    \"\"\"Helper function to synchronize (barrier)\n        among all processes when using distributed training\"\"\"\n    if not dist.is_available():\n        return\n    if not dist.is_initialized():\n        return\n    current_world_size = dist.get_world_size()\n    if current_world_size == 1:\n        return\n    dist.barrier()\n\n\ndef all_gather_object(obj):\n    world_size = get_world_size()\n    if world_size < 2:\n        return [obj]\n    output = [None for _ in range(world_size)]\n    dist.all_gather_object(output, obj)\n    return output\n\n\ndef is_available() -> bool:\n    return dist.is_available()\n"
  },
  {
    "path": "requirements-dev.txt",
    "content": "# code formatter\n# force to use same version of the formatter, can be changed only by maintainer.\n\nanybadge\nautoflake==1.4\nblack==20.8b1\nflake8\ngitlint\nisort==4.3.21\nnbsphinx\npre-commit\npylint==2.3.1\npytest\npytest-cov\nradon==4.2.0\nrecommonmark\nseed-isort-config\nsetuptools\n\n# -----  document usage\nsphinx==3.5.4\nsphinx-material\nsphinx_markdown_tables\n"
  },
  {
    "path": "requirements.txt",
    "content": "numba\nnumpy\nnuscenes-devkit\nopencv-python-headless\npandas\npytorch-lightning==1.6.0\nscikit-image\nscipy\nsetuptools==59.5.0\ntensorboardX\ntorch==1.9.0\ntorchvision==0.10.0\n"
  },
  {
    "path": "scripts/gen_info.py",
    "content": "import mmcv\nimport numpy as np\nfrom nuscenes.nuscenes import NuScenes\nfrom nuscenes.utils import splits\nfrom tqdm import tqdm\n\n\ndef generate_info(nusc, scenes, max_cam_sweeps=6, max_lidar_sweeps=10):\n    infos = list()\n    for cur_scene in tqdm(nusc.scene):\n        if cur_scene['name'] not in scenes:\n            continue\n        first_sample_token = cur_scene['first_sample_token']\n        cur_sample = nusc.get('sample', first_sample_token)\n        while True:\n            info = dict()\n            sweep_cam_info = dict()\n            cam_datas = list()\n            lidar_datas = list()\n            info['sample_token'] = cur_sample['token']\n            info['timestamp'] = cur_sample['timestamp']\n            info['scene_token'] = cur_sample['scene_token']\n            cam_names = [\n                'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT', 'CAM_BACK',\n                'CAM_BACK_LEFT', 'CAM_FRONT_LEFT'\n            ]\n            lidar_names = ['LIDAR_TOP']\n            cam_infos = dict()\n            lidar_infos = dict()\n            for cam_name in cam_names:\n                cam_data = nusc.get('sample_data',\n                                    cur_sample['data'][cam_name])\n                cam_datas.append(cam_data)\n                sweep_cam_info = dict()\n                sweep_cam_info['sample_token'] = cam_data['sample_token']\n                sweep_cam_info['ego_pose'] = nusc.get(\n                    'ego_pose', cam_data['ego_pose_token'])\n                sweep_cam_info['timestamp'] = cam_data['timestamp']\n                sweep_cam_info['is_key_frame'] = cam_data['is_key_frame']\n                sweep_cam_info['height'] = cam_data['height']\n                sweep_cam_info['width'] = cam_data['width']\n                sweep_cam_info['filename'] = cam_data['filename']\n                sweep_cam_info['calibrated_sensor'] = nusc.get(\n                    'calibrated_sensor', cam_data['calibrated_sensor_token'])\n                cam_infos[cam_name] = sweep_cam_info\n            for lidar_name in lidar_names:\n                lidar_data = nusc.get('sample_data',\n                                      cur_sample['data'][lidar_name])\n                lidar_datas.append(lidar_data)\n                sweep_lidar_info = dict()\n                sweep_lidar_info['sample_token'] = lidar_data['sample_token']\n                sweep_lidar_info['ego_pose'] = nusc.get(\n                    'ego_pose', lidar_data['ego_pose_token'])\n                sweep_lidar_info['timestamp'] = lidar_data['timestamp']\n                sweep_lidar_info['filename'] = lidar_data['filename']\n                sweep_lidar_info['calibrated_sensor'] = nusc.get(\n                    'calibrated_sensor', lidar_data['calibrated_sensor_token'])\n                lidar_infos[lidar_name] = sweep_lidar_info\n\n            lidar_sweeps = [dict() for _ in range(max_lidar_sweeps)]\n            cam_sweeps = [dict() for _ in range(max_cam_sweeps)]\n            info['cam_infos'] = cam_infos\n            info['lidar_infos'] = lidar_infos\n            # for i in range(max_cam_sweeps):\n            #     cam_sweeps.append(dict())\n            for k, cam_data in enumerate(cam_datas):\n                sweep_cam_data = cam_data\n                for j in range(max_cam_sweeps):\n                    if sweep_cam_data['prev'] == '':\n                        break\n                    else:\n                        sweep_cam_data = nusc.get('sample_data',\n                                                  sweep_cam_data['prev'])\n                        sweep_cam_info = dict()\n                        sweep_cam_info['sample_token'] = sweep_cam_data[\n                            'sample_token']\n                        if sweep_cam_info['sample_token'] != cam_data[\n                                'sample_token']:\n                            break\n                        sweep_cam_info['ego_pose'] = nusc.get(\n                            'ego_pose', cam_data['ego_pose_token'])\n                        sweep_cam_info['timestamp'] = sweep_cam_data[\n                            'timestamp']\n                        sweep_cam_info['is_key_frame'] = sweep_cam_data[\n                            'is_key_frame']\n                        sweep_cam_info['height'] = sweep_cam_data['height']\n                        sweep_cam_info['width'] = sweep_cam_data['width']\n                        sweep_cam_info['filename'] = sweep_cam_data['filename']\n                        sweep_cam_info['calibrated_sensor'] = nusc.get(\n                            'calibrated_sensor',\n                            cam_data['calibrated_sensor_token'])\n                        cam_sweeps[j][cam_names[k]] = sweep_cam_info\n\n            for k, lidar_data in enumerate(lidar_datas):\n                sweep_lidar_data = lidar_data\n                for j in range(max_lidar_sweeps):\n                    if sweep_lidar_data['prev'] == '':\n                        break\n                    else:\n                        sweep_lidar_data = nusc.get('sample_data',\n                                                    sweep_lidar_data['prev'])\n                        sweep_lidar_info = dict()\n                        sweep_lidar_info['sample_token'] = sweep_lidar_data[\n                            'sample_token']\n                        if sweep_lidar_info['sample_token'] != lidar_data[\n                                'sample_token']:\n                            break\n                        sweep_lidar_info['ego_pose'] = nusc.get(\n                            'ego_pose', sweep_lidar_data['ego_pose_token'])\n                        sweep_lidar_info['timestamp'] = sweep_lidar_data[\n                            'timestamp']\n                        sweep_lidar_info['is_key_frame'] = sweep_lidar_data[\n                            'is_key_frame']\n                        sweep_lidar_info['filename'] = sweep_lidar_data[\n                            'filename']\n                        sweep_lidar_info['calibrated_sensor'] = nusc.get(\n                            'calibrated_sensor',\n                            cam_data['calibrated_sensor_token'])\n                        lidar_sweeps[j][lidar_names[k]] = sweep_lidar_info\n            # Remove empty sweeps.\n            for i, sweep in enumerate(cam_sweeps):\n                if len(sweep.keys()) == 0:\n                    cam_sweeps = cam_sweeps[:i]\n                    break\n            for i, sweep in enumerate(lidar_sweeps):\n                if len(sweep.keys()) == 0:\n                    lidar_sweeps = lidar_sweeps[:i]\n                    break\n            info['cam_sweeps'] = cam_sweeps\n            info['lidar_sweeps'] = lidar_sweeps\n            ann_infos = list()\n            if 'anns' in cur_sample:\n                for ann in cur_sample['anns']:\n                    ann_info = nusc.get('sample_annotation', ann)\n                    velocity = nusc.box_velocity(ann_info['token'])\n                    if np.any(np.isnan(velocity)):\n                        velocity = np.zeros(3)\n                    ann_info['velocity'] = velocity\n                    ann_infos.append(ann_info)\n                info['ann_infos'] = ann_infos\n            infos.append(info)\n            if cur_sample['next'] == '':\n                break\n            else:\n                cur_sample = nusc.get('sample', cur_sample['next'])\n    return infos\n\n\ndef main():\n    trainval_nusc = NuScenes(version='v1.0-trainval',\n                             dataroot='./data/nuScenes/',\n                             verbose=True)\n    train_scenes = splits.train\n    val_scenes = splits.val\n    train_infos = generate_info(trainval_nusc, train_scenes)\n    val_infos = generate_info(trainval_nusc, val_scenes)\n    mmcv.dump(train_infos, './data/nuScenes/nuscenes_infos_train.pkl')\n    mmcv.dump(val_infos, './data/nuScenes/nuscenes_infos_val.pkl')\n    test_nusc = NuScenes(version='v1.0-test',\n                         dataroot='./data/nuScenes/',\n                         verbose=True)\n    test_scenes = splits.test\n    test_infos = generate_info(test_nusc, test_scenes)\n    mmcv.dump(test_infos, './data/nuScenes/nuscenes_infos_test.pkl')\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "scripts/visualize_nusc.py",
    "content": "import os\nfrom argparse import ArgumentParser\n\nimport cv2\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport mmcv\nimport numpy as np\nfrom nuscenes.utils.data_classes import Box, LidarPointCloud\nfrom pyquaternion import Quaternion\n\nfrom bevdepth.datasets.nusc_det_dataset import \\\n    map_name_from_general_to_detection\n\n\ndef parse_args():\n    parser = ArgumentParser(add_help=False)\n    parser.add_argument('idx',\n                        type=int,\n                        help='Index of the dataset to be visualized.')\n    parser.add_argument('result_path', help='Path of the result json file.')\n    parser.add_argument('target_path',\n                        help='Target path to save the visualization result.')\n\n    args = parser.parse_args()\n    return args\n\n\ndef get_ego_box(box_dict, ego2global_rotation, ego2global_translation):\n    box = Box(\n        box_dict['translation'],\n        box_dict['size'],\n        Quaternion(box_dict['rotation']),\n    )\n    trans = -np.array(ego2global_translation)\n    rot = Quaternion(ego2global_rotation).inverse\n    box.translate(trans)\n    box.rotate(rot)\n    box_xyz = np.array(box.center)\n    box_dxdydz = np.array(box.wlh)[[1, 0, 2]]\n    box_yaw = np.array([box.orientation.yaw_pitch_roll[0]])\n    box_velo = np.array(box.velocity[:2])\n    return np.concatenate([box_xyz, box_dxdydz, box_yaw, box_velo])\n\n\ndef rotate_points_along_z(points, angle):\n    \"\"\"\n    Args:\n        points: (B, N, 3 + C)\n        angle: (B), angle along z-axis, angle increases x ==> y\n    Returns:\n    \"\"\"\n    cosa = np.cos(angle)\n    sina = np.sin(angle)\n    zeros = np.zeros(points.shape[0])\n    ones = np.ones(points.shape[0])\n    rot_matrix = np.stack(\n        (cosa, sina, zeros, -sina, cosa, zeros, zeros, zeros, ones),\n        axis=1).reshape(-1, 3, 3)\n    points_rot = np.matmul(points[:, :, 0:3], rot_matrix)\n    points_rot = np.concatenate((points_rot, points[:, :, 3:]), axis=-1)\n    return points_rot\n\n\ndef get_corners(boxes3d):\n    \"\"\"\n        7 -------- 4\n       /|         /|\n      6 -------- 5 .\n      | |        | |\n      . 3 -------- 0\n      |/         |/\n      2 -------- 1\n    Args:\n        boxes3d:  (N, 7) [x, y, z, dx, dy, dz, heading],\n            (x, y, z) is the box center\n    Returns:\n    \"\"\"\n    template = (np.array((\n        [1, 1, -1],\n        [1, -1, -1],\n        [-1, -1, -1],\n        [-1, 1, -1],\n        [1, 1, 1],\n        [1, -1, 1],\n        [-1, -1, 1],\n        [-1, 1, 1],\n    )) / 2)\n\n    corners3d = np.tile(boxes3d[:, None, 3:6],\n                        [1, 8, 1]) * template[None, :, :]\n    corners3d = rotate_points_along_z(corners3d.reshape(-1, 8, 3),\n                                      boxes3d[:, 6]).reshape(-1, 8, 3)\n    corners3d += boxes3d[:, None, 0:3]\n\n    return corners3d\n\n\ndef get_bev_lines(corners):\n    return [[[corners[i, 0], corners[(i + 1) % 4, 0]],\n             [corners[i, 1], corners[(i + 1) % 4, 1]]] for i in range(4)]\n\n\ndef get_3d_lines(corners):\n    ret = []\n    for st, ed in [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7],\n                   [7, 4], [0, 4], [1, 5], [2, 6], [3, 7]]:\n        if corners[st, -1] > 0 and corners[ed, -1] > 0:\n            ret.append([[corners[st, 0], corners[ed, 0]],\n                        [corners[st, 1], corners[ed, 1]]])\n    return ret\n\n\ndef get_cam_corners(corners, translation, rotation, cam_intrinsics):\n    cam_corners = corners.copy()\n    cam_corners -= np.array(translation)\n    cam_corners = cam_corners @ Quaternion(rotation).inverse.rotation_matrix.T\n    cam_corners = cam_corners @ np.array(cam_intrinsics).T\n    valid = cam_corners[:, -1] > 0\n    cam_corners /= cam_corners[:, 2:3]\n    cam_corners[~valid] = 0\n    return cam_corners\n\n\ndef demo(\n    idx,\n    nusc_results_file,\n    dump_file,\n    threshold=0.0,\n    show_range=60,\n    show_classes=[\n        'car',\n        'truck',\n        'construction_vehicle',\n        'bus',\n        'trailer',\n        'barrier',\n        'motorcycle',\n        'bicycle',\n        'pedestrian',\n        'traffic_cone',\n    ],\n):\n    # Set cameras\n    IMG_KEYS = [\n        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_RIGHT',\n        'CAM_BACK', 'CAM_BACK_LEFT'\n    ]\n    infos = mmcv.load('data/nuScenes/nuscenes_12hz_infos_val.pkl')\n    assert idx < len(infos)\n    # Get data from dataset\n    results = mmcv.load(nusc_results_file)['results']\n    info = infos[idx]\n    lidar_path = info['lidar_infos']['LIDAR_TOP']['filename']\n    lidar_points = np.fromfile(os.path.join('data/nuScenes', lidar_path),\n                               dtype=np.float32,\n                               count=-1).reshape(-1, 5)[..., :4]\n    lidar_calibrated_sensor = info['lidar_infos']['LIDAR_TOP'][\n        'calibrated_sensor']\n    # Get point cloud\n    pts = lidar_points.copy()\n    ego2global_rotation = np.mean(\n        [info['cam_infos'][cam]['ego_pose']['rotation'] for cam in IMG_KEYS],\n        0)\n    ego2global_translation = np.mean([\n        info['cam_infos'][cam]['ego_pose']['translation'] for cam in IMG_KEYS\n    ], 0)\n    lidar_points = LidarPointCloud(lidar_points.T)\n    lidar_points.rotate(\n        Quaternion(lidar_calibrated_sensor['rotation']).rotation_matrix)\n    lidar_points.translate(np.array(lidar_calibrated_sensor['translation']))\n    pts = lidar_points.points.T\n\n    # Get GT corners\n    gt_corners = []\n    for i in range(len(info['ann_infos'])):\n        if map_name_from_general_to_detection[\n                info['ann_infos'][i]['category_name']] in show_classes:\n            box = get_ego_box(\n                dict(\n                    size=info['ann_infos'][i]['size'],\n                    rotation=info['ann_infos'][i]['rotation'],\n                    translation=info['ann_infos'][i]['translation'],\n                ), ego2global_rotation, ego2global_translation)\n            if np.linalg.norm(box[:2]) <= show_range:\n                corners = get_corners(box[None])[0]\n                gt_corners.append(corners)\n\n    # Get prediction corners\n    pred_corners, pred_class = [], []\n    for box in results[info['sample_token']]:\n        if box['detection_score'] >= threshold and box[\n                'detection_name'] in show_classes:\n            box3d = get_ego_box(box, ego2global_rotation,\n                                ego2global_translation)\n            box3d[2] += 0.5 * box3d[5]  # NOTE\n            if np.linalg.norm(box3d[:2]) <= show_range:\n                corners = get_corners(box3d[None])[0]\n                pred_corners.append(corners)\n                pred_class.append(box['detection_name'])\n\n    # Set figure size\n    plt.figure(figsize=(24, 8))\n\n    for i, k in enumerate(IMG_KEYS):\n        # Draw camera views\n        fig_idx = i + 1 if i < 3 else i + 2\n        plt.subplot(2, 4, fig_idx)\n\n        # Set camera attributes\n        plt.title(k)\n        plt.axis('off')\n        plt.xlim(0, 1600)\n        plt.ylim(900, 0)\n\n        img = mmcv.imread(\n            os.path.join('data/nuScenes', info['cam_infos'][k]['filename']))\n        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n        # Draw images\n        plt.imshow(img)\n\n        # Draw 3D predictions\n        for corners, cls in zip(pred_corners, pred_class):\n            cam_corners = get_cam_corners(\n                corners,\n                info['cam_infos'][k]['calibrated_sensor']['translation'],\n                info['cam_infos'][k]['calibrated_sensor']['rotation'],\n                info['cam_infos'][k]['calibrated_sensor']['camera_intrinsic'])\n            lines = get_3d_lines(cam_corners)\n            for line in lines:\n                plt.plot(line[0],\n                         line[1],\n                         c=cm.get_cmap('tab10')(show_classes.index(cls)))\n\n    # Draw BEV\n    plt.subplot(1, 4, 4)\n\n    # Set BEV attributes\n    plt.title('LIDAR_TOP')\n    plt.axis('equal')\n    plt.xlim(-40, 40)\n    plt.ylim(-40, 40)\n\n    # Draw point cloud\n    plt.scatter(-pts[:, 1], pts[:, 0], s=0.01, c=pts[:, -1], cmap='gray')\n\n    # Draw BEV GT boxes\n    for corners in gt_corners:\n        lines = get_bev_lines(corners)\n        for line in lines:\n            plt.plot([-x for x in line[1]],\n                     line[0],\n                     c='r',\n                     label='ground truth')\n\n    # Draw BEV predictions\n    for corners in pred_corners:\n        lines = get_bev_lines(corners)\n        for line in lines:\n            plt.plot([-x for x in line[1]], line[0], c='g', label='prediction')\n\n    # Set legend\n    handles, labels = plt.gca().get_legend_handles_labels()\n    by_label = dict(zip(labels, handles))\n    plt.legend(by_label.values(),\n               by_label.keys(),\n               loc='upper right',\n               framealpha=1)\n\n    # Save figure\n    plt.tight_layout(w_pad=0, h_pad=2)\n    plt.savefig(dump_file)\n\n\nif __name__ == '__main__':\n    args = parse_args()\n    demo(\n        args.idx,\n        args.result_path,\n        args.target_path,\n    )\n"
  },
  {
    "path": "setup.py",
    "content": "import os\n\nimport torch\nfrom setuptools import find_packages, setup\nfrom torch.utils.cpp_extension import (BuildExtension, CppExtension,\n                                       CUDAExtension)\n\nwith open('README.md', 'r') as fh:\n    long_description = fh.read()\n\n\ndef make_cuda_ext(name,\n                  module,\n                  sources,\n                  sources_cuda=[],\n                  extra_args=[],\n                  extra_include_path=[]):\n\n    define_macros = []\n    extra_compile_args = {'cxx': [] + extra_args}\n\n    if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':\n        define_macros += [('WITH_CUDA', None)]\n        extension = CUDAExtension\n        extra_compile_args['nvcc'] = extra_args + [\n            '-D__CUDA_NO_HALF_OPERATORS__',\n            '-D__CUDA_NO_HALF_CONVERSIONS__',\n            '-D__CUDA_NO_HALF2_OPERATORS__',\n        ]\n        sources += sources_cuda\n    else:\n        print('Compiling {} without CUDA'.format(name))\n        extension = CppExtension\n        # raise EnvironmentError('CUDA is required to compile MMDetection!')\n\n    return extension(\n        name='{}.{}'.format(module, name),\n        sources=[os.path.join(*module.split('.'), p) for p in sources],\n        include_dirs=extra_include_path,\n        define_macros=define_macros,\n        extra_compile_args=extra_compile_args,\n    )\n\n\nsetup(\n    name='BEVDepth',\n    version='0.0.1',\n    author='Megvii',\n    author_email='liyinhao@megvii.com',\n    description='Code for BEVDepth',\n    long_description=long_description,\n    long_description_content_type='text/markdown',\n    url=None,\n    packages=find_packages(),\n    classifiers=[\n        'Programming Language :: Python :: 3',\n        'Operating System :: OS Independent',\n    ],\n    install_requires=[],\n    ext_modules=[\n        make_cuda_ext(\n            name='voxel_pooling_train_ext',\n            module='bevdepth.ops.voxel_pooling_train',\n            sources=['src/voxel_pooling_train_forward.cpp'],\n            sources_cuda=['src/voxel_pooling_train_forward_cuda.cu'],\n        ),\n        make_cuda_ext(\n            name='voxel_pooling_inference_ext',\n            module='bevdepth.ops.voxel_pooling_inference',\n            sources=['src/voxel_pooling_inference_forward.cpp'],\n            sources_cuda=['src/voxel_pooling_inference_forward_cuda.cu'],\n        ),\n    ],\n    cmdclass={'build_ext': BuildExtension},\n)\n"
  },
  {
    "path": "test/test_dataset/test_nusc_mv_det_dataset.py",
    "content": "import unittest\n\nimport numpy as np\nimport torch\n\nfrom bevdepth.datasets.nusc_det_dataset import NuscDetDataset\n\nCLASSES = [\n    'car',\n    'truck',\n    'construction_vehicle',\n    'bus',\n    'trailer',\n    'barrier',\n    'motorcycle',\n    'bicycle',\n    'pedestrian',\n    'traffic_cone',\n]\nH = 900\nW = 1600\nfinal_dim = (256, 704)\nimg_conf = dict(img_mean=[123.675, 116.28, 103.53],\n                img_std=[58.395, 57.12, 57.375],\n                to_rgb=True)\nida_aug_conf = {\n    'resize_lim': (0.4, 0.4),\n    'final_dim':\n    final_dim,\n    'rot_lim': (0, 0),\n    'H':\n    H,\n    'W':\n    W,\n    'rand_flip':\n    True,\n    'bot_pct_lim': (0.0, 0.0),\n    'cams': [\n        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',\n        'CAM_BACK', 'CAM_BACK_RIGHT'\n    ],\n    'Ncams':\n    6,\n}\n\nbda_aug_conf = {\n    'rot_lim': (0, 0),\n    'scale_lim': (1, 1),\n    'flip_dx_ratio': 0,\n    'flip_dy_ratio': 0\n}\n\n\nclass TestNuscMVDetData(unittest.TestCase):\n\n    def test_voxel_pooling(self):\n        np.random.seed(0)\n        torch.random.manual_seed(0)\n        nusc = NuscDetDataset(ida_aug_conf,\n                              bda_aug_conf,\n                              CLASSES,\n                              './test/data/nuscenes',\n                              './test/data/nuscenes/infos.pkl',\n                              True,\n                              sweep_idxes=[4])\n        ret_list = nusc[0]\n        assert torch.isclose(ret_list[0].mean(),\n                             torch.tensor(-0.4667),\n                             rtol=1e-3)\n        assert torch.isclose(ret_list[1].mean(),\n                             torch.tensor(0.1678),\n                             rtol=1e-3)\n        assert torch.isclose(ret_list[2].mean(),\n                             torch.tensor(230.0464),\n                             rtol=1e-3)\n        assert torch.isclose(ret_list[3].mean(),\n                             torch.tensor(8.3250),\n                             rtol=1e-3)\n        assert torch.isclose(ret_list[4].mean(), torch.tensor(0.25), rtol=1e-3)\n        assert torch.isclose(ret_list[5].mean(), torch.tensor(0.25), rtol=1e-3)\n"
  },
  {
    "path": "test/test_layers/test_backbone.py",
    "content": "import unittest\n\nimport pytest\nimport torch\n\nfrom bevdepth.layers.backbones.base_lss_fpn import BaseLSSFPN\n\n\nclass TestLSSFPN(unittest.TestCase):\n\n    def setUp(self) -> None:\n        backbone_conf = {\n            'x_bound': [-10, 10, 0.5],\n            'y_bound': [-10, 10, 0.5],\n            'z_bound': [-5, 3, 8],\n            'd_bound': [2.0, 22, 1.0],\n            'final_dim': [64, 64],\n            'output_channels':\n            10,\n            'downsample_factor':\n            16,\n            'img_backbone_conf':\n            dict(type='ResNet',\n                 depth=18,\n                 frozen_stages=0,\n                 out_indices=[0, 1, 2, 3],\n                 norm_eval=False,\n                 base_channels=8),\n            'img_neck_conf':\n            dict(\n                type='SECONDFPN',\n                in_channels=[8, 16, 32, 64],\n                upsample_strides=[0.25, 0.5, 1, 2],\n                out_channels=[16, 16, 16, 16],\n            ),\n            'depth_net_conf':\n            dict(in_channels=64, mid_channels=64),\n        }\n        self.lss_fpn = BaseLSSFPN(**backbone_conf).cuda()\n\n    @pytest.mark.skipif(torch.cuda.is_available() is False,\n                        reason='No gpu available.')\n    def test_forward(self):\n        sweep_imgs = torch.rand(2, 2, 6, 3, 64, 64).cuda()\n        sensor2ego_mats = torch.rand(2, 2, 6, 4, 4).cuda()\n        intrin_mats = torch.rand(2, 2, 6, 4, 4).cuda()\n        ida_mats = torch.rand(2, 2, 6, 4, 4).cuda()\n        sensor2sensor_mats = torch.rand(2, 2, 6, 4, 4).cuda()\n        bda_mat = torch.rand(2, 4, 4).cuda()\n        mats_dict = dict()\n        mats_dict['sensor2ego_mats'] = sensor2ego_mats\n        mats_dict['intrin_mats'] = intrin_mats\n        mats_dict['ida_mats'] = ida_mats\n        mats_dict['sensor2sensor_mats'] = sensor2sensor_mats\n        mats_dict['bda_mat'] = bda_mat\n        preds = self.lss_fpn.forward(sweep_imgs, mats_dict)\n        assert preds.shape == torch.Size([2, 20, 40, 40])\n"
  },
  {
    "path": "test/test_layers/test_head.py",
    "content": "import unittest\n\nimport pytest\nimport torch\nfrom mmdet3d.core.bbox.structures.lidar_box3d import LiDARInstance3DBoxes\n\nfrom bevdepth.layers.heads.bev_depth_head import BEVDepthHead\n\n\nclass TestLSSFPN(unittest.TestCase):\n\n    def setUp(self) -> None:\n        bev_backbone = dict(\n            type='ResNet',\n            in_channels=10,\n            depth=18,\n            num_stages=3,\n            strides=(1, 2, 2),\n            dilations=(1, 1, 1),\n            out_indices=[0, 1, 2],\n            norm_eval=False,\n            base_channels=20,\n        )\n\n        bev_neck = dict(type='SECONDFPN',\n                        in_channels=[10, 20, 40, 80],\n                        upsample_strides=[1, 2, 4, 8],\n                        out_channels=[8, 8, 8, 8])\n\n        TASKS = [\n            dict(num_class=1, class_names=['car']),\n            dict(num_class=2, class_names=['truck', 'construction_vehicle']),\n            dict(num_class=2, class_names=['bus', 'trailer']),\n            dict(num_class=1, class_names=['barrier']),\n            dict(num_class=2, class_names=['motorcycle', 'bicycle']),\n            dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),\n        ]\n\n        common_heads = dict(reg=(2, 2),\n                            height=(1, 2),\n                            dim=(3, 2),\n                            rot=(2, 2),\n                            vel=(2, 2))\n\n        bbox_coder = dict(\n            type='CenterPointBBoxCoder',\n            post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],\n            max_num=500,\n            score_threshold=0.1,\n            out_size_factor=32,\n            voxel_size=[0.2, 0.2, 8],\n            pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],\n            code_size=9,\n        )\n\n        train_cfg = dict(\n            point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3],\n            grid_size=[512, 512, 1],\n            voxel_size=[0.2, 0.2, 8],\n            out_size_factor=32,\n            dense_reg=1,\n            gaussian_overlap=0.1,\n            max_objs=500,\n            min_radius=2,\n            code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5],\n        )\n\n        test_cfg = dict(\n            post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],\n            max_per_img=500,\n            max_pool_nms=False,\n            min_radius=[4, 12, 10, 1, 0.85, 0.175],\n            score_threshold=0.1,\n            out_size_factor=4,\n            voxel_size=[0.2, 0.2, 8],\n            nms_type='circle',\n            pre_max_size=1000,\n            post_max_size=83,\n            nms_thr=0.2,\n        )\n\n        head_conf = {\n            'bev_backbone_conf': bev_backbone,\n            'bev_neck_conf': bev_neck,\n            'tasks': TASKS,\n            'common_heads': common_heads,\n            'bbox_coder': bbox_coder,\n            'train_cfg': train_cfg,\n            'test_cfg': test_cfg,\n            'in_channels': 32,  # Equal to bev_neck output_channels.\n            'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'),\n            'loss_bbox': dict(type='L1Loss',\n                              reduction='mean',\n                              loss_weight=0.25),\n            'gaussian_overlap': 0.1,\n            'min_radius': 2,\n        }\n        self.bevdet_head = BEVDepthHead(**head_conf).cuda()\n\n    @pytest.mark.skipif(torch.cuda.is_available() is False,\n                        reason='No gpu available.')\n    def test_forward(self):\n        x = torch.rand(2, 10, 32, 32).cuda()\n        ret_results = self.bevdet_head.forward(x)\n        assert len(ret_results) == 6\n        assert ret_results[0][0]['reg'].shape == torch.Size([2, 2, 32, 32])\n        assert ret_results[0][0]['height'].shape == torch.Size([2, 1, 32, 32])\n        assert ret_results[0][0]['dim'].shape == torch.Size([2, 3, 32, 32])\n        assert ret_results[0][0]['rot'].shape == torch.Size([2, 2, 32, 32])\n        assert ret_results[0][0]['vel'].shape == torch.Size([2, 2, 32, 32])\n        assert ret_results[0][0]['heatmap'].shape == torch.Size([2, 1, 32, 32])\n\n    @pytest.mark.skipif(torch.cuda.is_available() is False,\n                        reason='No gpu available.')\n    def test_get_targets(self):\n        gt_boxes_3d_0 = torch.rand(10, 9).cuda()\n        gt_boxes_3d_1 = torch.rand(15, 9).cuda()\n        gt_boxes_3d_0[:, :2] *= 10\n        gt_boxes_3d_1[:, :2] *= 10\n        gt_labels_3d_0 = torch.randint(0, 10, (10, )).cuda()\n        gt_labels_3d_1 = torch.randint(0, 10, (15, )).cuda()\n        gt_boxes_3d = [gt_boxes_3d_0, gt_boxes_3d_1]\n        gt_labels_3d = [gt_labels_3d_0, gt_labels_3d_1]\n        heatmaps, anno_boxes, inds, masks = self.bevdet_head.get_targets(\n            gt_boxes_3d, gt_labels_3d)\n        assert len(heatmaps) == 6\n        assert len(anno_boxes) == 6\n        assert len(inds) == 6\n        assert len(masks) == 6\n        assert heatmaps[0].shape == torch.Size([2, 1, 16, 16])\n        assert anno_boxes[0].shape == torch.Size([2, 500, 10])\n        assert inds[0].shape == torch.Size([2, 500])\n        assert masks[0].shape == torch.Size([2, 500])\n\n    @pytest.mark.skipif(torch.cuda.is_available() is False,\n                        reason='No gpu available.')\n    def test_get_bboxes(self):\n        x = torch.rand(2, 10, 32, 32).cuda()\n        ret_results = self.bevdet_head.forward(x)\n        img_metas = [\n            dict(box_type_3d=LiDARInstance3DBoxes),\n            dict(box_type_3d=LiDARInstance3DBoxes)\n        ]\n        pred_bboxes = self.bevdet_head.get_bboxes(ret_results,\n                                                  img_metas=img_metas)\n        assert len(pred_bboxes) == 2\n        assert len(pred_bboxes[0]) == 3\n        assert pred_bboxes[0][1].shape == torch.Size([498])\n        assert pred_bboxes[0][2].shape == torch.Size([498])\n"
  },
  {
    "path": "test/test_layers/test_matrixvt.py",
    "content": "import unittest\n\nimport torch\n\nfrom bevdepth.layers.backbones.matrixvt import MatrixVT\n\n\nclass TestMatrixVT(unittest.TestCase):\n\n    def setUp(self) -> None:\n        backbone_conf = {\n            'x_bound': [-10, 10, 0.5],\n            'y_bound': [-10, 10, 0.5],\n            'z_bound': [-5, 3, 8],\n            'd_bound': [2.0, 22, 1.0],\n            'final_dim': [64, 64],\n            'output_channels':\n            10,\n            'downsample_factor':\n            16,\n            'img_backbone_conf':\n            dict(\n                type='ResNet',\n                depth=18,\n                frozen_stages=0,\n                out_indices=[0, 1, 2, 3],\n                norm_eval=False,\n                base_channels=8,\n            ),\n            'img_neck_conf':\n            dict(\n                type='SECONDFPN',\n                in_channels=[8, 16, 32, 64],\n                upsample_strides=[0.25, 0.5, 1, 2],\n                out_channels=[16, 16, 16, 16],\n            ),\n            'depth_net_conf':\n            dict(in_channels=64, mid_channels=64),\n        }\n\n        model = MatrixVT(**backbone_conf)\n\n        return model\n\n    def test_forward(self):\n        model = self.setUp()\n        bev_feature, depth = model(\n            torch.rand((2, 1, 6, 3, 64, 64)),\n            {\n                'sensor2ego_mats': torch.rand((2, 1, 6, 4, 4)),\n                'intrin_mats': torch.rand((2, 1, 6, 4, 4)),\n                'ida_mats': torch.rand((2, 1, 6, 4, 4)),\n                'sensor2sensor_mats': torch.rand((2, 1, 6, 4, 4)),\n                'bda_mat': torch.rand((2, 4, 4)),\n            },\n            is_return_depth=True,\n        )\n        print(bev_feature.shape)\n        print(depth.shape)\n        assert bev_feature.shape == torch.Size([2, 10, 40, 40])\n        assert depth.shape == torch.Size([12, 20, 4, 4])\n"
  },
  {
    "path": "test/test_ops/test_voxel_pooling.py",
    "content": "import unittest\n\nimport pytest\nimport torch\n\nfrom bevdepth.ops.voxel_pooling_train import voxel_pooling_train\n\n\nclass TestLSSFPN(unittest.TestCase):\n\n    @pytest.mark.skipif(condition=torch.cuda.is_available() is False,\n                        reason='No gpu available.')\n    def test_voxel_pooling(self):\n        import numpy as np\n\n        np.random.seed(0)\n        torch.manual_seed(0)\n        geom_xyz = torch.rand([2, 6, 10, 10, 10, 3]) * 160 - 80\n        geom_xyz[..., 2] /= 100\n        geom_xyz = geom_xyz.reshape(2, -1, 3)\n        features = torch.rand([2, 6, 10, 10, 10, 80]) - 0.5\n        gt_features = features.reshape(2, -1, 80)\n        gt_bev_featuremap = features.new_zeros(2, 128, 128, 80)\n        for i in range(2):\n            for j in range(geom_xyz.shape[1]):\n                x = geom_xyz[i, j, 0].int()\n                y = geom_xyz[i, j, 1].int()\n                z = geom_xyz[i, j, 2].int()\n                if x < 0 or x >= 128 or y < 0 or y >= 128 or z < 0 or z >= 1:\n                    continue\n                gt_bev_featuremap[i, y, x, :] += gt_features[i, j, :]\n        gt_bev_featuremap = gt_bev_featuremap.permute(0, 3, 1, 2).cuda()\n        bev_featuremap = voxel_pooling_train(\n            geom_xyz.cuda().int(), features.cuda(),\n            torch.tensor([128, 128, 1], dtype=torch.int, device='cuda'))\n        assert torch.allclose(gt_bev_featuremap.cuda(),\n                              bev_featuremap,\n                              rtol=1e-3)\n"
  }
]