Copy disabled (too large)
Download .txt
Showing preview only (36,444K chars total). Download the full file to get everything.
Repository: G-1nOnly/Dens3R
Branch: main
Commit: a7349873e066
Files: 775
Total size: 126.6 MB
Directory structure:
gitextract_di20vn86/
├── AutoReconForDens3R/
│ ├── .gitattributes
│ ├── .github/
│ │ ├── ISSUE_TEMPLATE/
│ │ │ ├── bug-report.md
│ │ │ └── feature_request.md
│ │ └── workflows/
│ │ ├── core_code_checks.yml
│ │ ├── publish.yml
│ │ └── viewer_build_deploy.yml
│ ├── .gitignore
│ ├── .prettierrc.js
│ ├── LICENSE
│ ├── README.md
│ ├── colab/
│ │ └── demo.ipynb
│ ├── docs/
│ │ └── INSTALL.md
│ ├── exps/
│ │ └── code-release/
│ │ ├── bmvs/
│ │ │ ├── scan1.sh
│ │ │ ├── scan2.sh
│ │ │ ├── scan3.sh
│ │ │ ├── scan4.sh
│ │ │ ├── scan5.sh
│ │ │ └── scan6.sh
│ │ ├── co3d_demo/
│ │ │ ├── scan1.sh
│ │ │ ├── scan2.sh
│ │ │ ├── scan3.sh
│ │ │ ├── scan4.sh
│ │ │ └── scan5.sh
│ │ ├── run_dens3r_recon.sh
│ │ └── run_pipeline_demo_low-res.sh
│ ├── nerfstudio/
│ │ ├── __init__.py
│ │ ├── cameras/
│ │ │ ├── __init__.py
│ │ │ ├── camera_optimizers.py
│ │ │ ├── camera_paths.py
│ │ │ ├── camera_utils.py
│ │ │ ├── cameras.py
│ │ │ ├── lie_groups.py
│ │ │ └── rays.py
│ │ ├── configs/
│ │ │ ├── __init__.py
│ │ │ ├── autorecon/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── baseline.py
│ │ │ │ ├── common.py
│ │ │ │ ├── distilled_neusfacto.py
│ │ │ │ ├── feature_field.py
│ │ │ │ ├── neusfacto_fast.py
│ │ │ │ ├── regularization.py
│ │ │ │ └── semantic_nerf.py
│ │ │ ├── base_config.py
│ │ │ ├── config_utils.py
│ │ │ └── method_configs.py
│ │ ├── engine/
│ │ │ ├── __init__.py
│ │ │ ├── callbacks.py
│ │ │ ├── optimizers.py
│ │ │ ├── schedulers.py
│ │ │ └── trainer.py
│ │ ├── exporter/
│ │ │ ├── __init__.py
│ │ │ ├── exporter_utils.py
│ │ │ ├── mesh_culling_utils.py
│ │ │ ├── texture_utils.py
│ │ │ └── tsdf_utils.py
│ │ ├── field_components/
│ │ │ ├── __init__.py
│ │ │ ├── activations.py
│ │ │ ├── base_field_component.py
│ │ │ ├── embedding.py
│ │ │ ├── encodings.py
│ │ │ ├── field_heads.py
│ │ │ ├── mlp.py
│ │ │ ├── progressive_encoding.py
│ │ │ ├── spatial_distortions.py
│ │ │ └── temporal_distortions.py
│ │ ├── fields/
│ │ │ ├── __init__.py
│ │ │ ├── base_field.py
│ │ │ ├── density_fields.py
│ │ │ ├── feature_field.py
│ │ │ ├── instant_ngp_field.py
│ │ │ ├── nerfacto_field.py
│ │ │ ├── nerfw_field.py
│ │ │ ├── sdf_field.py
│ │ │ ├── semantic_nerf_field.py
│ │ │ ├── tensorf_field.py
│ │ │ └── vanilla_nerf_field.py
│ │ ├── model_components/
│ │ │ ├── __init__.py
│ │ │ ├── losses.py
│ │ │ ├── patch_warping.py
│ │ │ ├── ray_generators.py
│ │ │ ├── ray_samplers.py
│ │ │ ├── renderers.py
│ │ │ └── scene_colliders.py
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── base_model.py
│ │ │ ├── base_surface_model.py
│ │ │ ├── distilled_neus_facto.py
│ │ │ ├── dto.py
│ │ │ ├── instant_ngp.py
│ │ │ ├── mipnerf.py
│ │ │ ├── monosdf.py
│ │ │ ├── nerfacto.py
│ │ │ ├── neuralreconW.py
│ │ │ ├── neus.py
│ │ │ ├── neus_acc.py
│ │ │ ├── neus_facto.py
│ │ │ ├── neus_facto_dff.py
│ │ │ ├── neus_facto_reg.py
│ │ │ ├── semantic_nerfw.py
│ │ │ ├── tensorf.py
│ │ │ ├── unisurf.py
│ │ │ ├── vanilla_nerf.py
│ │ │ └── volsdf.py
│ │ ├── pipelines/
│ │ │ ├── __init__.py
│ │ │ ├── base_pipeline.py
│ │ │ └── dynamic_batch.py
│ │ ├── process_data/
│ │ │ ├── __init__.py
│ │ │ ├── colmap_utils.py
│ │ │ ├── hloc_utils.py
│ │ │ ├── insta360_utils.py
│ │ │ ├── metashape_utils.py
│ │ │ ├── polycam_utils.py
│ │ │ ├── process_data_utils.py
│ │ │ └── record3d_utils.py
│ │ ├── py.typed
│ │ ├── utils/
│ │ │ ├── __init__.py
│ │ │ ├── bilateral_solver.py
│ │ │ ├── colormaps.py
│ │ │ ├── colors.py
│ │ │ ├── comms.py
│ │ │ ├── decorators.py
│ │ │ ├── eval_utils.py
│ │ │ ├── func_utils.py
│ │ │ ├── images.py
│ │ │ ├── install_checks.py
│ │ │ ├── io.py
│ │ │ ├── marching_cubes.py
│ │ │ ├── mask_utils.py
│ │ │ ├── math.py
│ │ │ ├── misc.py
│ │ │ ├── plotly_utils.py
│ │ │ ├── pointclouds.py
│ │ │ ├── poses.py
│ │ │ ├── printing.py
│ │ │ ├── profiler.py
│ │ │ ├── rich_utils.py
│ │ │ ├── scheduler.py
│ │ │ ├── scripts.py
│ │ │ ├── tensor_dataclass.py
│ │ │ ├── vis_utils.py
│ │ │ └── writer.py
│ │ └── viewer/
│ │ ├── __init__.py
│ │ ├── app/
│ │ │ ├── .eslintrc.json
│ │ │ ├── .gitignore
│ │ │ ├── package.json
│ │ │ ├── public/
│ │ │ │ ├── electron.js
│ │ │ │ ├── index.html
│ │ │ │ ├── manifest.json
│ │ │ │ └── robots.txt
│ │ │ ├── requirements.txt
│ │ │ ├── run_deploy.py
│ │ │ └── src/
│ │ │ ├── App.jsx
│ │ │ ├── SceneNode.js
│ │ │ ├── index.jsx
│ │ │ ├── index.scss
│ │ │ ├── modules/
│ │ │ │ ├── Banner/
│ │ │ │ │ ├── Banner.jsx
│ │ │ │ │ └── index.jsx
│ │ │ │ ├── ConfigPanel/
│ │ │ │ │ ├── ConfigPanel.jsx
│ │ │ │ │ └── ConfigPanelSlice.js
│ │ │ │ ├── LandingModal/
│ │ │ │ │ ├── LandingModal.jsx
│ │ │ │ │ └── index.jsx
│ │ │ │ ├── LogPanel/
│ │ │ │ │ └── LogPanel.jsx
│ │ │ │ ├── RenderModal/
│ │ │ │ │ ├── RenderModal.jsx
│ │ │ │ │ └── index.jsx
│ │ │ │ ├── Scene/
│ │ │ │ │ ├── Scene.jsx
│ │ │ │ │ └── drawing.js
│ │ │ │ ├── SidePanel/
│ │ │ │ │ ├── CameraPanel/
│ │ │ │ │ │ ├── CameraHelper.js
│ │ │ │ │ │ ├── CameraPanel.jsx
│ │ │ │ │ │ ├── curve.js
│ │ │ │ │ │ └── index.jsx
│ │ │ │ │ ├── ExportPanel/
│ │ │ │ │ │ ├── ExportPanel.jsx
│ │ │ │ │ │ ├── MeshSubPanel.jsx
│ │ │ │ │ │ ├── PointcloudSubPanel.jsx
│ │ │ │ │ │ └── index.jsx
│ │ │ │ │ ├── ScenePanel/
│ │ │ │ │ │ ├── ScenePanel.jsx
│ │ │ │ │ │ └── index.jsx
│ │ │ │ │ ├── SidePanel.jsx
│ │ │ │ │ └── StatusPanel/
│ │ │ │ │ ├── StatusPanel.jsx
│ │ │ │ │ └── index.jsx
│ │ │ │ ├── ViewerWindow/
│ │ │ │ │ ├── ViewerWindow.jsx
│ │ │ │ │ └── ViewerWindowSlice.js
│ │ │ │ ├── ViewportControlsModal/
│ │ │ │ │ ├── ViewportControlsModal.jsx
│ │ │ │ │ └── index.jsx
│ │ │ │ ├── WebRtcWindow/
│ │ │ │ │ └── WebRtcWindow.jsx
│ │ │ │ ├── WebSocket/
│ │ │ │ │ └── WebSocket.jsx
│ │ │ │ └── WebSocketUrlField.jsx
│ │ │ ├── reducer.js
│ │ │ ├── setupTests.js
│ │ │ ├── store.js
│ │ │ ├── subscriber.js
│ │ │ ├── themes/
│ │ │ │ ├── leva_theme.json
│ │ │ │ └── theme.ts
│ │ │ └── utils.js
│ │ └── server/
│ │ ├── README.md
│ │ ├── __init__.py
│ │ ├── path.py
│ │ ├── server.py
│ │ ├── state/
│ │ │ ├── node.py
│ │ │ └── state_node.py
│ │ ├── subprocess.py
│ │ ├── utils.py
│ │ ├── video_stream.py
│ │ ├── viewer_utils.py
│ │ └── visualizer.py
│ ├── pyproject.toml
│ ├── scripts/
│ │ ├── __init__.py
│ │ ├── benchmarking/
│ │ │ ├── launch_eval_blender.sh
│ │ │ └── launch_train_blender.sh
│ │ ├── blender/
│ │ │ ├── render_mesh_blender.py
│ │ │ └── render_pointcloud_blender.py
│ │ ├── completions/
│ │ │ ├── .gitignore
│ │ │ ├── __init__.py
│ │ │ ├── install.py
│ │ │ ├── setup.bash
│ │ │ └── setup.zsh
│ │ ├── datasets/
│ │ │ ├── extract_monocular_cues.py
│ │ │ ├── process_nerfstudio_to_sdfstudio.py
│ │ │ ├── process_neuralrgbd_to_sdfstudio.py
│ │ │ ├── process_nuscenes_masks.py
│ │ │ └── process_scannet_to_sdfstudio.py
│ │ ├── docs/
│ │ │ ├── __init__.py
│ │ │ ├── add_nb_tags.py
│ │ │ └── build_docs.py
│ │ ├── eval.py
│ │ ├── eval_mask.py
│ │ ├── exporter.py
│ │ ├── extract_mesh.py
│ │ ├── extract_volume.py
│ │ ├── generate_kitti360_trainsplit.py
│ │ ├── github/
│ │ │ ├── __init__.py
│ │ │ └── run_actions.py
│ │ ├── heritage_to_nerfstudio.py
│ │ ├── licensing/
│ │ │ ├── copyright.txt
│ │ │ └── license_headers.sh
│ │ ├── preprocess/
│ │ │ └── preprocess_neus_pose.py
│ │ ├── process_data.py
│ │ ├── render.json
│ │ ├── render.py
│ │ ├── render_mesh.py
│ │ ├── texture.py
│ │ ├── train.py
│ │ └── viewer/
│ │ └── view_dataset.py
│ ├── setup.cfg
│ ├── tests/
│ │ ├── cameras/
│ │ │ ├── test_cameras.py
│ │ │ └── test_rays.py
│ │ ├── field_components/
│ │ │ ├── test_embedding.py
│ │ │ ├── test_encodings.py
│ │ │ ├── test_field_outputs.py
│ │ │ ├── test_fields.py
│ │ │ ├── test_mlp.py
│ │ │ └── test_temporal_distortions.py
│ │ ├── model_components/
│ │ │ ├── test_ray_sampler.py
│ │ │ └── test_renderers.py
│ │ ├── test_train.py
│ │ └── utils/
│ │ ├── test_poses.py
│ │ ├── test_tensor_dataclass.py
│ │ └── test_visualization.py
│ └── third_party/
│ └── AutoDecomp/
│ ├── .gitignore
│ ├── LICENSE
│ ├── README.md
│ ├── auto_decomp/
│ │ ├── __init__.py
│ │ ├── cli/
│ │ │ └── inference_transformer.py
│ │ ├── configs/
│ │ │ └── inference_transformer/
│ │ │ ├── config.yaml
│ │ │ ├── cvpr.yaml
│ │ │ ├── cvpr_idr.yaml
│ │ │ ├── idr.yaml
│ │ │ ├── low-res.yaml
│ │ │ └── low-res_idr.yaml
│ │ ├── decomp/
│ │ │ ├── preprocess.py
│ │ │ └── transformer/
│ │ │ ├── __init__.py
│ │ │ ├── dataset/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── ncut.py
│ │ │ │ └── utils.py
│ │ │ ├── lightning/
│ │ │ │ └── module.py
│ │ │ ├── modeling/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── encoding.py
│ │ │ │ ├── point_transformer.py
│ │ │ │ ├── utils.py
│ │ │ │ └── vision_transformer.py
│ │ │ └── utils/
│ │ │ ├── postprocess.py
│ │ │ └── saving.py
│ │ ├── feature_extraction/
│ │ │ ├── __init__.py
│ │ │ └── dino_vit/
│ │ │ ├── __init__.py
│ │ │ ├── extract_features.py
│ │ │ └── vit_extractor.py
│ │ ├── sfm/
│ │ │ ├── __init__.py
│ │ │ ├── colmap_from_co3d.py
│ │ │ ├── colmap_from_idr.py
│ │ │ ├── enums.py
│ │ │ ├── pairs_from_sequential.py
│ │ │ └── sfm.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── aggregation.py
│ │ ├── cli.py
│ │ ├── colmap.py
│ │ ├── geometry/
│ │ │ ├── __init__.py
│ │ │ ├── box3d.py
│ │ │ ├── pointcloud/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── convert.py
│ │ │ │ ├── misc.py
│ │ │ │ ├── plane.py
│ │ │ │ ├── pointcloud.py
│ │ │ │ └── segmentation.py
│ │ │ └── transform.py
│ │ ├── misc.py
│ │ ├── ray.py
│ │ ├── tqdm.py
│ │ └── viz_3d.py
│ ├── ckpts/
│ │ └── no-chair.ckpt
│ ├── docs/
│ │ └── INSTALL.md
│ ├── requirements.txt
│ ├── scripts/
│ │ ├── run_pipeline_demo.sh
│ │ ├── run_pipeline_demo_low-res.sh
│ │ ├── test_pipeline_bmvs/
│ │ │ ├── bmvs_scan1.sh
│ │ │ ├── bmvs_scan2.sh
│ │ │ ├── bmvs_scan3.sh
│ │ │ ├── bmvs_scan4.sh
│ │ │ ├── bmvs_scan5.sh
│ │ │ ├── bmvs_scan6.sh
│ │ │ ├── cvpr/
│ │ │ │ ├── README.md
│ │ │ │ ├── bmvs_scan1.sh
│ │ │ │ ├── bmvs_scan2.sh
│ │ │ │ ├── bmvs_scan3.sh
│ │ │ │ ├── bmvs_scan4.sh
│ │ │ │ ├── bmvs_scan5.sh
│ │ │ │ └── bmvs_scan6.sh
│ │ │ └── low-res/
│ │ │ ├── README.md
│ │ │ └── bmvs_scan1.sh
│ │ └── test_pipeline_co3d_manual-poses/
│ │ ├── co3d_scan1.sh
│ │ ├── co3d_scan2.sh
│ │ ├── co3d_scan3.sh
│ │ ├── co3d_scan4.sh
│ │ ├── co3d_scan5.sh
│ │ └── cvpr/
│ │ ├── co3d_scan1.sh
│ │ ├── co3d_scan2.sh
│ │ ├── co3d_scan3.sh
│ │ ├── co3d_scan4.sh
│ │ └── co3d_scan5.sh
│ ├── setup.py
│ └── third_party/
│ ├── Hierarchical-Localization/
│ │ ├── .gitattributes
│ │ ├── .gitignore
│ │ ├── .gitmodules
│ │ ├── Dockerfile
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── datasets/
│ │ │ └── sacre_coeur/
│ │ │ └── README.md
│ │ ├── demo.ipynb
│ │ ├── hloc/
│ │ │ ├── __init__.py
│ │ │ ├── colmap_from_nvm.py
│ │ │ ├── extract_features.py
│ │ │ ├── extractors/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── cosplace.py
│ │ │ │ ├── d2net.py
│ │ │ │ ├── dir.py
│ │ │ │ ├── disk.py
│ │ │ │ ├── dog.py
│ │ │ │ ├── netvlad.py
│ │ │ │ ├── openibl.py
│ │ │ │ ├── r2d2.py
│ │ │ │ └── superpoint.py
│ │ │ ├── localize_inloc.py
│ │ │ ├── localize_sfm.py
│ │ │ ├── match_dense.py
│ │ │ ├── match_features.py
│ │ │ ├── matchers/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── adalam.py
│ │ │ │ ├── loftr.py
│ │ │ │ ├── nearest_neighbor.py
│ │ │ │ └── superglue.py
│ │ │ ├── pairs_from_covisibility.py
│ │ │ ├── pairs_from_exhaustive.py
│ │ │ ├── pairs_from_poses.py
│ │ │ ├── pairs_from_retrieval.py
│ │ │ ├── pipelines/
│ │ │ │ ├── 4Seasons/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── localize.py
│ │ │ │ │ ├── prepare_reference.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── 7Scenes/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── create_gt_sfm.py
│ │ │ │ │ ├── pipeline.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── Aachen/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── pipeline.py
│ │ │ │ ├── Aachen_v1_1/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── pipeline.py
│ │ │ │ │ └── pipeline_loftr.py
│ │ │ │ ├── CMU/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── pipeline.py
│ │ │ │ ├── Cambridge/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── pipeline.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── RobotCar/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── colmap_from_nvm.py
│ │ │ │ │ └── pipeline.py
│ │ │ │ └── __init__.py
│ │ │ ├── reconstruction.py
│ │ │ ├── triangulation.py
│ │ │ ├── utils/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── base_model.py
│ │ │ │ ├── database.py
│ │ │ │ ├── geometry.py
│ │ │ │ ├── io.py
│ │ │ │ ├── parsers.py
│ │ │ │ ├── read_write_model.py
│ │ │ │ ├── viz.py
│ │ │ │ └── viz_3d.py
│ │ │ └── visualization.py
│ │ ├── pairs/
│ │ │ ├── aachen/
│ │ │ │ ├── pairs-db-covis20.txt
│ │ │ │ ├── pairs-query-netvlad20.txt
│ │ │ │ ├── pairs-query-netvlad30.txt
│ │ │ │ └── pairs-query-netvlad50.txt
│ │ │ ├── aachen_v1.1/
│ │ │ │ ├── pairs-db-covis20.txt
│ │ │ │ └── pairs-query-netvlad50.txt
│ │ │ └── inloc/
│ │ │ ├── pairs-query-netvlad20.txt
│ │ │ ├── pairs-query-netvlad30-temporal.txt
│ │ │ ├── pairs-query-netvlad30.txt
│ │ │ ├── pairs-query-netvlad40-temporal.txt
│ │ │ └── pairs-query-netvlad40.txt
│ │ ├── pipeline_Aachen.ipynb
│ │ ├── pipeline_InLoc.ipynb
│ │ ├── pipeline_SfM.ipynb
│ │ ├── requirements.txt
│ │ ├── setup.py
│ │ └── third_party/
│ │ ├── SuperGluePretrainedNetwork/
│ │ │ ├── .gitignore
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── assets/
│ │ │ │ ├── megadepth_train_scenes.txt
│ │ │ │ ├── megadepth_validation_scenes.txt
│ │ │ │ ├── phototourism_sample_pairs.txt
│ │ │ │ ├── phototourism_test_pairs.txt
│ │ │ │ ├── phototourism_test_pairs_original.txt
│ │ │ │ ├── scannet_sample_pairs_with_gt.txt
│ │ │ │ ├── scannet_test_pairs_with_gt.txt
│ │ │ │ ├── yfcc_test_pairs_with_gt.txt
│ │ │ │ └── yfcc_test_pairs_with_gt_original.txt
│ │ │ ├── demo_superglue.py
│ │ │ ├── match_pairs.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── matching.py
│ │ │ │ ├── superglue.py
│ │ │ │ ├── superpoint.py
│ │ │ │ ├── utils.py
│ │ │ │ └── weights/
│ │ │ │ ├── superglue_indoor.pth
│ │ │ │ ├── superglue_outdoor.pth
│ │ │ │ └── superpoint_v1.pth
│ │ │ └── requirements.txt
│ │ ├── d2net/
│ │ │ ├── .gitignore
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── extract_features.py
│ │ │ ├── extract_hesaff.m
│ │ │ ├── hpatches_sequences/
│ │ │ │ ├── HPatches-Sequences-Matching-Benchmark.ipynb
│ │ │ │ ├── README.md
│ │ │ │ ├── convert_to_png.sh
│ │ │ │ ├── download.sh
│ │ │ │ └── download_cache.sh
│ │ │ ├── image_list_hpatches_sequences.txt
│ │ │ ├── image_list_qualitative.txt
│ │ │ ├── inloc/
│ │ │ │ ├── README.md
│ │ │ │ ├── custom_demo.m
│ │ │ │ ├── functions/
│ │ │ │ │ └── wustl_function/
│ │ │ │ │ ├── Features_WUSTL.m
│ │ │ │ │ └── parfor_sparseGV.m
│ │ │ │ ├── generate_list.m
│ │ │ │ └── merge_files.m
│ │ │ ├── megadepth_utils/
│ │ │ │ ├── preprocess_scene.py
│ │ │ │ ├── preprocess_undistorted_megadepth.sh
│ │ │ │ ├── train_scenes.txt
│ │ │ │ ├── undistort_reconstructions.py
│ │ │ │ └── valid_scenes.txt
│ │ │ ├── qualitative/
│ │ │ │ └── Qualitative-Matches.ipynb
│ │ │ └── train.py
│ │ ├── deep-image-retrieval/
│ │ │ ├── .gitignore
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ └── dirtorch/
│ │ │ ├── datasets/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __main__.py
│ │ │ │ ├── create.py
│ │ │ │ ├── dataset.py
│ │ │ │ ├── downloader.py
│ │ │ │ ├── generic.py
│ │ │ │ ├── generic_func.py
│ │ │ │ ├── landmarks.py
│ │ │ │ ├── landmarks18.py
│ │ │ │ ├── oxford.py
│ │ │ │ └── paris.py
│ │ │ ├── extract_features.py
│ │ │ ├── extract_kapture.py
│ │ │ ├── loss.py
│ │ │ ├── nets/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── __main__.py
│ │ │ │ ├── backbones/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── resnet.py
│ │ │ │ │ └── resnext101_features.py
│ │ │ │ ├── layers/
│ │ │ │ │ └── pooling.py
│ │ │ │ ├── rmac_resnet.py
│ │ │ │ ├── rmac_resnet_fpn.py
│ │ │ │ └── rmac_resnext.py
│ │ │ ├── test_dir.py
│ │ │ └── utils/
│ │ │ ├── common.py
│ │ │ ├── convenient.py
│ │ │ ├── evaluation.py
│ │ │ ├── funcs.py
│ │ │ ├── pytorch_loader.py
│ │ │ ├── transforms.py
│ │ │ └── transforms_tools.py
│ │ ├── disk/
│ │ │ ├── .gitignore
│ │ │ ├── .gitmodules
│ │ │ ├── LICENSE.txt
│ │ │ ├── README.md
│ │ │ ├── colmap/
│ │ │ │ ├── colmap/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── database.py
│ │ │ │ │ ├── license.txt
│ │ │ │ │ ├── read_dense.py
│ │ │ │ │ └── read_model.py
│ │ │ │ ├── colmap2dataset.py
│ │ │ │ ├── h5_to_db.py
│ │ │ │ └── merge_datasets.py
│ │ │ ├── compute_validation_auc.py
│ │ │ ├── detect.py
│ │ │ ├── disk/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── common/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── errors.py
│ │ │ │ │ ├── image.py
│ │ │ │ │ ├── logger.py
│ │ │ │ │ ├── structs.py
│ │ │ │ │ └── vis.py
│ │ │ │ ├── geom/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── distance_matrix.py
│ │ │ │ │ ├── epi.py
│ │ │ │ │ └── pose.py
│ │ │ │ ├── loss/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── discrete_metric.py
│ │ │ │ │ ├── pose_metric.py
│ │ │ │ │ ├── ransac.py
│ │ │ │ │ ├── reinforce.py
│ │ │ │ │ └── rewards.py
│ │ │ │ └── model/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── consistent_matcher.py
│ │ │ │ ├── cycle_matcher.py
│ │ │ │ ├── detector.py
│ │ │ │ ├── disk.py
│ │ │ │ └── nms.py
│ │ │ ├── download_dataset
│ │ │ ├── match.py
│ │ │ ├── requirements.txt
│ │ │ ├── train.py
│ │ │ └── view_h5.py
│ │ └── r2d2/
│ │ ├── LICENSE
│ │ ├── NOTICE
│ │ ├── README.md
│ │ ├── datasets/
│ │ │ ├── __init__.py
│ │ │ ├── aachen.py
│ │ │ ├── dataset.py
│ │ │ ├── imgfolder.py
│ │ │ ├── pair_dataset.py
│ │ │ └── web_images.py
│ │ ├── download_training_data.sh
│ │ ├── extract.py
│ │ ├── extract_kapture.py
│ │ ├── models/
│ │ │ ├── r2d2_WAF_N16.pt
│ │ │ ├── r2d2_WASF_N16.pt
│ │ │ └── r2d2_WASF_N8_big.pt
│ │ ├── nets/
│ │ │ ├── ap_loss.py
│ │ │ ├── losses.py
│ │ │ ├── patchnet.py
│ │ │ ├── reliability_loss.py
│ │ │ ├── repeatability_loss.py
│ │ │ └── sampler.py
│ │ ├── results/
│ │ │ ├── r2d2_WAF_N16.scale-0.3-1.npy
│ │ │ ├── r2d2_WAF_N16.size-256-1024.npy
│ │ │ ├── r2d2_WASF_N16.scale-0.3-1.npy
│ │ │ ├── r2d2_WASF_N16.size-256-1024.npy
│ │ │ └── r2d2_W_N16.scale-0.3-1.npy
│ │ ├── tools/
│ │ │ ├── common.py
│ │ │ ├── dataloader.py
│ │ │ ├── trainer.py
│ │ │ ├── transforms.py
│ │ │ ├── transforms_tools.py
│ │ │ └── viz.py
│ │ ├── train.py
│ │ └── viz_heatmaps.py
│ └── LoFTR/
│ ├── .gitignore
│ ├── .gitmodules
│ ├── LICENSE
│ ├── README.md
│ ├── assets/
│ │ ├── megadepth_test_1500_scene_info/
│ │ │ ├── 0015_0.1_0.3.npz
│ │ │ ├── 0015_0.3_0.5.npz
│ │ │ ├── 0022_0.1_0.3.npz
│ │ │ ├── 0022_0.3_0.5.npz
│ │ │ ├── 0022_0.5_0.7.npz
│ │ │ └── megadepth_test_1500.txt
│ │ └── scannet_test_1500/
│ │ ├── intrinsics.npz
│ │ ├── scannet_test.txt
│ │ ├── statistics.json
│ │ └── test.npz
│ ├── configs/
│ │ └── loftr/
│ │ ├── indoor/
│ │ │ ├── buggy_pos_enc/
│ │ │ │ ├── loftr_ds.py
│ │ │ │ ├── loftr_ds_dense.py
│ │ │ │ ├── loftr_ot.py
│ │ │ │ └── loftr_ot_dense.py
│ │ │ ├── debug/
│ │ │ │ └── .gitignore
│ │ │ ├── loftr_ds.py
│ │ │ ├── loftr_ds_dense.py
│ │ │ ├── loftr_ot.py
│ │ │ ├── loftr_ot_dense.py
│ │ │ └── scannet/
│ │ │ ├── loftr_ds_eval.py
│ │ │ └── loftr_ds_eval_new.py
│ │ └── outdoor/
│ │ ├── buggy_pos_enc/
│ │ │ ├── loftr_ds.py
│ │ │ ├── loftr_ds_dense.py
│ │ │ ├── loftr_ot.py
│ │ │ └── loftr_ot_dense.py
│ │ ├── debug/
│ │ │ └── .gitignore
│ │ ├── loftr_ds.py
│ │ ├── loftr_ds_dense.py
│ │ ├── loftr_ot.py
│ │ └── loftr_ot_dense.py
│ ├── demo/
│ │ ├── demo_loftr.py
│ │ └── run_demo.sh
│ ├── docs/
│ │ └── TRAINING.md
│ ├── environment.yaml
│ ├── loftr/
│ │ ├── __init__.py
│ │ ├── config/
│ │ │ └── default.py
│ │ ├── datasets/
│ │ │ ├── megadepth.py
│ │ │ ├── sampler.py
│ │ │ └── scannet.py
│ │ ├── lightning/
│ │ │ ├── data.py
│ │ │ └── lightning_loftr.py
│ │ ├── loftr/
│ │ │ ├── __init__.py
│ │ │ ├── backbone/
│ │ │ │ ├── __init__.py
│ │ │ │ └── resnet_fpn.py
│ │ │ ├── loftr.py
│ │ │ ├── loftr_module/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── fine_preprocess.py
│ │ │ │ ├── linear_attention.py
│ │ │ │ └── transformer.py
│ │ │ └── utils/
│ │ │ ├── coarse_matching.py
│ │ │ ├── cvpr_ds_config.py
│ │ │ ├── fine_matching.py
│ │ │ ├── geometry.py
│ │ │ ├── position_encoding.py
│ │ │ └── supervision.py
│ │ ├── losses/
│ │ │ └── loftr_loss.py
│ │ ├── optimizers/
│ │ │ └── __init__.py
│ │ ├── tools/
│ │ │ └── hloc_match_features.py
│ │ └── utils/
│ │ ├── augment.py
│ │ ├── comm.py
│ │ ├── dataloader.py
│ │ ├── dataset.py
│ │ ├── metrics.py
│ │ ├── misc.py
│ │ ├── plotting.py
│ │ └── profiler.py
│ ├── requirements.txt
│ ├── scripts/
│ │ ├── reproduce_test/
│ │ │ ├── indoor_ds.sh
│ │ │ ├── indoor_ds_new.sh
│ │ │ ├── indoor_ot.sh
│ │ │ ├── outdoor_ds.sh
│ │ │ └── outdoor_ot.sh
│ │ └── reproduce_train/
│ │ ├── debug/
│ │ │ └── .gitignore
│ │ ├── indoor_ds.sh
│ │ ├── indoor_ot.sh
│ │ ├── outdoor_ds.sh
│ │ └── outdoor_ot.sh
│ ├── setup.py
│ ├── test.py
│ └── train.py
├── LICENSE
├── NOTICE
├── README.md
├── croco/
│ ├── .gitignore
│ ├── LICENSE
│ ├── NOTICE
│ ├── README.MD
│ ├── datasets/
│ │ ├── __init__.py
│ │ ├── crops/
│ │ │ ├── README.MD
│ │ │ └── extract_crops_from_images.py
│ │ ├── habitat_sim/
│ │ │ ├── README.MD
│ │ │ ├── __init__.py
│ │ │ ├── generate_from_metadata.py
│ │ │ ├── generate_from_metadata_files.py
│ │ │ ├── generate_multiview_images.py
│ │ │ ├── multiview_habitat_sim_generator.py
│ │ │ ├── pack_metadata_files.py
│ │ │ └── paths.py
│ │ ├── pairs_dataset.py
│ │ └── transforms.py
│ ├── models/
│ │ ├── blocks.py
│ │ ├── criterion.py
│ │ ├── croco.py
│ │ ├── croco_downstream.py
│ │ ├── curope/
│ │ │ ├── __init__.py
│ │ │ ├── curope.cpp
│ │ │ ├── curope2d.py
│ │ │ ├── kernels.cu
│ │ │ └── setup.py
│ │ ├── dpt_block.py
│ │ ├── head_downstream.py
│ │ ├── masking.py
│ │ └── pos_embed.py
│ ├── pretrain.py
│ ├── stereoflow/
│ │ ├── README.MD
│ │ ├── augmentor.py
│ │ ├── criterion.py
│ │ ├── datasets_flow.py
│ │ ├── datasets_stereo.py
│ │ ├── download_model.sh
│ │ ├── engine.py
│ │ ├── test.py
│ │ └── train.py
│ └── utils/
│ └── misc.py
├── dust3r/
│ ├── __init__.py
│ ├── datasets/
│ │ ├── __init__.py
│ │ ├── base/
│ │ │ ├── __init__.py
│ │ │ ├── base_stereo_view_dataset.py
│ │ │ ├── batched_sampler.py
│ │ │ ├── easy_dataset.py
│ │ │ └── mast3r_base_stereo_view_dataset.py
│ │ └── utils/
│ │ ├── __init__.py
│ │ ├── cropping.py
│ │ ├── mast3r_cropping.py
│ │ └── transforms.py
│ ├── heads/
│ │ ├── __init__.py
│ │ ├── dpt_head.py
│ │ ├── linear_head.py
│ │ └── postprocess.py
│ ├── image_pairs.py
│ ├── inference.py
│ ├── losses.py
│ ├── model.py
│ ├── optim_factory.py
│ ├── patch_embed.py
│ ├── post_process.py
│ ├── utils/
│ │ ├── __init__.py
│ │ ├── device.py
│ │ ├── geometry.py
│ │ ├── image.py
│ │ ├── misc.py
│ │ ├── parallel.py
│ │ ├── path_to_croco.py
│ │ └── read_write_model.py
│ └── viz.py
├── infer/
│ ├── demo_online.py
│ ├── demo_utils.py
│ ├── dens3r_recon.py
│ ├── eval_scripts/
│ │ ├── eval_depth.py
│ │ ├── eval_matching.py
│ │ ├── eval_normal.py
│ │ └── matching_metrics.py
│ └── infer_normal_pts3d.py
├── mast3r/
│ ├── .gitignore
│ ├── __init__.py
│ ├── catmlp_dpt_head.py
│ ├── colmap_utils/
│ │ ├── __init__.py
│ │ ├── database.py
│ │ └── database_utils.py
│ ├── fast_nn.py
│ ├── image_pairs.py
│ ├── losses.py
│ ├── model.py
│ ├── retrieval/
│ │ ├── graph.py
│ │ ├── model.py
│ │ └── processor.py
│ ├── ssim.py
│ └── utils/
│ ├── __init__.py
│ ├── coarse_to_fine.py
│ ├── collate.py
│ ├── misc.py
│ └── path_to_dust3r.py
└── requirements.txt
================================================
FILE CONTENTS
================================================
================================================
FILE: AutoReconForDens3R/.gitattributes
================================================
*.ipynb linguist-documentation
================================================
FILE: AutoReconForDens3R/.github/ISSUE_TEMPLATE/bug-report.md
================================================
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Additional context**
Add any other context about the problem here.
================================================
FILE: AutoReconForDens3R/.github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
================================================
FILE: AutoReconForDens3R/.github/workflows/core_code_checks.yml
================================================
name: Core Tests.
on:
push:
branches: [main]
pull_request:
branches: [main]
permissions:
contents: read
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.7.13
uses: actions/setup-python@v4
with:
python-version: '3.7.13'
- uses: actions/cache@v2
with:
path: ${{ env.pythonLocation }}
key: ${{ env.pythonLocation }}-${{ hashFiles('pyproject.toml') }}
- name: Install dependencies
run: |
pip install --upgrade --upgrade-strategy eager -e .[dev]
- name: Run license checks
run: |
./scripts/licensing/license_headers.sh --check
- name: Check notebook cell metadata
run: |
python ./scripts/docs/add_nb_tags.py --check
- name: Run isort
run: isort docs/ nerfstudio/ scripts/ tests/ --profile black --check
- name: Run Black
run: black docs/ nerfstudio/ scripts/ tests/ --check
- name: Python Pylint
run: |
pylint nerfstudio tests scripts
- name: Test with pytest
run: |
pytest
================================================
FILE: AutoReconForDens3R/.github/workflows/publish.yml
================================================
# This workflows will upload a Python Package using twine when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
name: Upload Python Package
on:
release:
types: [created]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v1
with:
python-version: '3.8'
- name: Install dependencies
run: |
python -m pip install build twine
- name: Strip unsupported tags in README
run: |
sed -i '/<!-- pypi-strip -->/,/<!-- \/pypi-strip -->/d' README.md
- name: Build and publish
env:
PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }}
run: |
python -m build
twine upload --username __token__ --password $PYPI_TOKEN dist/*
================================================
FILE: AutoReconForDens3R/.github/workflows/viewer_build_deploy.yml
================================================
name: Viewer Build and Deploy.
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
build:
name: Build
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./nerfstudio/viewer/app
steps:
- name: Checkout code
uses: actions/checkout@master
- name: Install Node.js
uses: actions/setup-node@v3
with:
node-version: 17.8.0
cache: 'yarn'
cache-dependency-path: ./nerfstudio/viewer/app/yarn.lock
- name: Install packages
run: yarn install
- name: Build project
run: CI=false yarn build
- name: Upload production-ready build files
uses: actions/upload-artifact@v2
with:
name: production-files
path: ./nerfstudio/viewer/app/build
deploy:
name: Deploy
needs: build
runs-on: ubuntu-latest
env:
SSH_KEY: ${{secrets.SSH_KEY}}
steps:
- uses: actions/checkout@v3
- name: Set up Python 3.8.12
uses: actions/setup-python@v4
with:
python-version: '3.8.12'
- name: Install dependencies
run: |
pip install -r ./nerfstudio/viewer/app/requirements.txt
- name: Download artifact
uses: actions/download-artifact@v2
with:
name: production-files
path: ./nerfstudio/viewer/app/build
- name: Get branch name (merge)
if: github.event_name != 'pull_request'
shell: bash
run: echo "BRANCH_NAME=$(echo ${GITHUB_REF#refs/heads/} | tr / -)" >> $GITHUB_ENV
- name: Get branch name (pull request)
if: github.event_name == 'pull_request'
shell: bash
run: echo "BRANCH_NAME=$(echo ${GITHUB_HEAD_REF} | tr / -)" >> $GITHUB_ENV
# TODO: detect file or scheme changes of the viewer and only
# increment the version.txt file when there is a change.
# Update the version.txt code and push to master when things change.
# https://github.com/marketplace/actions/changed-files
# - name: Run changed-files with defaults on the dir1
# id: changed-files-for-dir1
# uses: tj-actions/changed-files@v29.0.3
# with:
# path: nerfstudio/viewer/app
# - name: List all added files in dir1
# run: |
# for file in ${{ steps.changed-files-for-dir1.outputs.modified_files }}; do
# echo "$file was modified"
# done
- run: |
python ./nerfstudio/viewer/app/run_deploy.py \
--branch-name ${{ env.BRANCH_NAME }} \
--ssh-key-string "$SSH_KEY" \
--local-folder ./nerfstudio/viewer/app/build \
--package-json-filename ./nerfstudio/viewer/app/package.json \
--increment-version "False"
- run: cat ~/.ssh/config
================================================
FILE: AutoReconForDens3R/.gitignore
================================================
*_debug_*
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
!scripts/downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.envrc
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
# Experiments and outputs
outputs/
exports/
renders/
outputs
vis/
meshes/
gradio_cached_examples/
# tensorboard log files
events.out.*
# Data
data
!*/data
# Misc
old/
temp*
!temporal*
.nfs*
external/
__MACOSX/
node_modules/
bash/
cache/
package-lock.json
camera_paths/
._.DS_Store
.DS_Store
.vscode/settings.json
vis_results/
W/
*.ply
notebooks/
media/
outputs/
/Dockerfile
/.readthedocs.yaml
vis_meshes/
README_*.md
================================================
FILE: AutoReconForDens3R/.prettierrc.js
================================================
module.exports = {
trailingComma: 'all',
arrowParens: 'always',
singleQuote: true,
jsxSingleQuote: false,
};
================================================
FILE: AutoReconForDens3R/LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: AutoReconForDens3R/README.md
================================================
# AutoRecon: Automated 3D Object Discovery and Reconstruction
### [Project Page](https://zju3dv.github.io/autorecon) | [Paper](https://zju3dv.github.io/autorecon/files/autorecon.pdf)

> [AutoRecon: Automated 3D Object Discovery and Reconstruction](https://zju3dv.github.io/autorecon/files/autorecon.pdf)
> Yuang Wang, Xingyi He, Sida Peng, Haotong Lin, Hujun Bao, Xiaowei Zhou
> CVPR 2023
# About
This is a refactored version of the [AutoRecon](https://zju3dv.github.io/autorecon) project based on the NeRFStudio and the SDFStudio codebase. We separate the project into two parts. The coarse decomposition part is implemented in the [AutoDecomp](https://github.com/zju3dv/AutoDecomp) repo, which can be used as a general tool for 3D object discovery and preprocessing casual captures for object reconstruction. The neural surface reconstruction part is implemented here.
# Installation
Please refer to the [installation guide](docs/INSTALL.md) for detailed instructions.
# Run the pipeline
## Run the pipeline with your own data
Here we take a demo data as an example. In this example, we assume only a sequential stream of images is available. You can easily adapt your data to use AutoRecon.
1. Download the demo data from [Google Drive](https://drive.google.com/drive/folders/1IFbK9b7gzqwh9QkZe6zoLmcSFr_zW3rJ?usp=drive_link) and put it under `data` (-> `data/custom_data_example/...`)
2. Run the pipeline with `exps/code-release/run_pipeline_demo_low-res.sh`
> NOTE: In the demo script, we assume the images come from a sequential video stream and use sequential matching for SfM. If you have unordered images, the default setting might lead to inferior results and it is recommended to use exhaustive matching or vocab-tree instead.
## Run the pipeline with annotated data in the IDR format
1. Download the BlendedMVS data from [GoogleDrive](https://drive.google.com/drive/folders/1ZLQ0hap6o_Tjr7S6H_EAn17pz4_qFluW?usp=sharing) and put it under `data` (-> `data/BlendedMVS/...`)
2. Run one of the scripts in `exps/code-release/bmvs`
## Run the pipeline with CO3D data
1. Download the CO3D data from [GoogleDrive](https://drive.google.com/drive/folders/1u-ugNhwFVtV6TKZ2J29iwcdQY2iwVnoi?usp=sharing) and put it under `data` (-> `data/CO3D_DEMO/...`)
2. Run one of the scripts in `exps/code-release/co3d_demo`
# Extract Mesh
You can take the following script as a reference to extract mesh:
```bash
# Extract mesh with MC
LOG_DIR="path_to_log_dir"
MC_RES=512
MESH_FN="extracted_mesh_res-${MC_RES}.ply"
MESH_PATH="${LOG_DIR}/${MESH_FN}"
ns-extract-mesh \
--load-config $LOG_DIR/config.yml \
--load-dir $LOG_DIR/sdfstudio_models \
--output-path $MESH_PATH \
--chunk_size 25000 --store_float16 True \
--resolution $MC_RES \
--use_train_scene_box True \
--seg_aware_sdf False \
--remove_internal_geometry None \
--remove_non_maximum_connected_components True \
--close_holes False --simplify_mesh_final False
```
> NOTE: We postprocess the extracted mesh before evaluation by removing possible internal geometries with ambient occlusion. The postprocessing code depends on pymeshlab which only works on some machines. You can remove possible internal geometries manually with ambient occlusion using MeshLab.
# Citation
If you find this code useful for your research, please use the following BibTeX entry.
```bibtex
@inproceedings{wang2023autorecon,
title={AutoRecon: Automated 3D Object Discovery and Reconstruction},
author={Wang, Yuang and He, Xingyi and Peng, Sida and Lin, Haotong and Bao, Hujun and Zhou, Xiaowei},
booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition},
pages={21382--21391},
year={2023}
}
```
# Acknowledgement
This code is built upon the awesome projects including [nerfstudio](https://github.com/nerfstudio-project/nerfstudio/), [sdfstudio](https://github.com/autonomousvision/sdfstudio/blob/master/README.md), [nerfacc](https://github.com/KAIR-BAIR/nerfacc), [tyro](https://github.com/brentyi/tyro) and more. Thanks for these great projects!
================================================
FILE: AutoReconForDens3R/colab/demo.ipynb
================================================
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"<a href=\"https://colab.research.google.com/github/nerfstudio-project/nerfstudio/blob/tancik%2Fpolycam/colab/demo.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "SiiXJ7K_fePG"
},
"source": [
"<p align=\"center\">\n",
" <picture>\n",
" <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://docs.nerf.studio/en/latest/_images/logo-dark.png\">\n",
" <source media=\"(prefers-color-scheme: light)\" srcset=\"https://docs.nerf.studio/en/latest/_images/logo.png\">\n",
" <img alt=\"nerfstudio\" src=\"https://docs.nerf.studio/en/latest/_images/logo.png\" width=\"400\">\n",
" </picture>\n",
"</p>\n",
"\n",
"\n",
"# Nerfstudio: A collaboration friendly studio for NeRFs\n",
"\n",
"\n",
"\n",
"\n",
"This colab shows how to train and view NeRFs from Nerfstudio both on pre-made datasets or from your own videos/images.\n",
"\n",
"\\\\\n",
"\n",
"Credit to [NeX](https://nex-mpi.github.io/) for Google Colab format."
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Yyx5h6kz5ga7"
},
"source": [
"## Frequently Asked Questions\n",
"\n",
"* **Downloading custom data is stalling (no output):**\n",
" * This is a bug in Colab. The data is processing, but may take a while to complete. You will know processing completed if `data/nerfstudio/custom_data/transforms.json` exists. Terminating the cell early will result in not being able to train.\n",
"* **Processing custom data is taking a long time:**\n",
" * The time it takes to process data depends on the number of images and its resolution. If processing is taking too long, try lowering the resolution of your custom data.\n",
"* **Error: Data processing did not complete:**\n",
" * This means that the data processing script did not fully complete. This could be because there were not enough images, or that the images were of low quality. We recommend images with little to no motion blur and lots of visual overlap of the scene to increase the chances of successful processing.\n",
"* **Training is not showing progress**:\n",
" * The lack of output is a bug in Colab. You can see the training progress from the viewer.\n",
"* **Viewer Quality is bad / Low resolution**:\n",
" * This may be because more GPU is being used on training that rendering the viewer. Try pausing training or decreasing training utilization.\n",
"* **WARNING: Running pip as the 'root' user...:**:\n",
" * This and other pip warnings or errors can be safely ignored.\n",
"* **Other problems?**\n",
" * Feel free to create an issue on our [GitHub repo](https://github.com/nerfstudio-project/nerfstudio).\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "RGr33zHaHak0",
"outputId": "2641dce6-cd57-4d31-d8b1-860bc1adf17f"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"⏬ Downloading https://github.com/jaimergp/miniforge/releases/latest/download/Mambaforge-colab-Linux-x86_64.sh...\n",
"📦 Installing...\n",
"📌 Adjusting configuration...\n",
"🩹 Patching environment...\n",
"⏲ Done in 0:00:27\n",
"🔁 Restarting kernel...\n"
]
}
],
"source": [
"#@markdown <h1>Install Conda (requires runtime restart)</h1>\n",
"\n",
"!pip install -q condacolab\n",
"import condacolab\n",
"condacolab.install()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "9oyLHl8QfYwP"
},
"outputs": [],
"source": [
"#@markdown <h1>Install Nerfstudio and Dependencies (~10 min)</h1>\n",
"\n",
"%cd /content/\n",
"!pip install --upgrade pip\n",
"!pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html\n",
"\n",
"# Installing TinyCuda\n",
"%cd /content/\n",
"!gdown \"https://drive.google.com/u/1/uc?id=1q8fuc-Mqiev5GTBTRA5UPgCaQDzuqKqj\" \n",
"!pip install tinycudann-1.6-cp37-cp37m-linux_x86_64.whl\n",
"\n",
"# Installing COLMAP\n",
"%cd /content/\n",
"!conda install -c conda-forge colmap\n",
"\n",
"# Install nerfstudio\n",
"%cd /content/\n",
"# !pip install nerfstudio\n",
"!pip install git+https://github.com/nerfstudio-project/nerfstudio.git"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "msVLprI4gRA4"
},
"outputs": [],
"source": [
"#@markdown <h1> Downloading and Processing Data</h1>\n",
"#@markdown <h3>Pick the preset scene or upload your own images/video</h3>\n",
"import os\n",
"import glob\n",
"from google.colab import files\n",
"from IPython.core.display import display, HTML\n",
"\n",
"scene = '\\uD83D\\uDDBC poster' #@param ['🖼 poster', '🚜 dozer', '🌄 desolation', '📤 upload your images' , '🎥 upload your own video', '🔺 upload Polycam data', '💽 upload your own Record3D data']\n",
"scene = ' '.join(scene.split(' ')[1:])\n",
"\n",
"if scene == \"upload Polycam data\":\n",
" %cd /content/\n",
" !mkdir -p /content/data/nerfstudio/custom_data\n",
" %cd /content/data/nerfstudio/custom_data/\n",
" uploaded = files.upload()\n",
" dir = os.getcwd()\n",
" if len(uploaded.keys()) > 1:\n",
" print(\"ERROR, upload a single .zip file when processing Polycam data\")\n",
" dataset_dir = [os.path.join(dir, f) for f in uploaded.keys()][0]\n",
" !ns-process-data polycam --data $dataset_dir --output-dir /content/data/nerfstudio/custom_data/\n",
" scene = \"custom_data\"\n",
"elif scene == 'upload your own Record3D data':\n",
" display(HTML('<h3>Zip your Record3D folder, and upload.</h3>'))\n",
" display(HTML('<h3>More information on Record3D can be found <a href=\"https://docs.nerf.studio/en/latest/quickstart/custom_dataset.html#record3d-capture\" target=\"_blank\">here</a>.</h3>'))\n",
" %cd /content/\n",
" !mkdir -p /content/data/nerfstudio/custom_data\n",
" %cd /content/data/nerfstudio/custom_data/\n",
" uploaded = files.upload()\n",
" dir = os.getcwd()\n",
" preupload_datasets = [os.path.join(dir, f) for f in uploaded.keys()]\n",
" record_3d_zipfile = preupload_datasets[0]\n",
" !unzip $record_3d_zipfile -d /content/data/nerfstudio/custom_data\n",
" custom_data_directory = glob.glob('/content/data/nerfstudio/custom_data/*')[0]\n",
" !ns-process-data record3d --data $custom_data_directory --output-dir /content/data/nerfstudio/custom_data/\n",
" scene = \"custom_data\"\n",
"elif scene in ['upload your images', 'upload your own video']:\n",
" display(HTML('<h3>Select your custom data</h3>'))\n",
" display(HTML('<p/>You can select multiple images by pressing ctrl, cmd or shift and click.<p>'))\n",
" display(HTML('<p/>Note: This may take time, especially on hires inputs, so we recommend to download dataset after creation.<p>'))\n",
" !mkdir -p /content/data/nerfstudio/custom_data\n",
" if scene == 'upload your images':\n",
" !mkdir -p /content/data/nerfstudio/custom_data/raw_images\n",
" %cd /content/data/nerfstudio/custom_data/raw_images\n",
" uploaded = files.upload()\n",
" dir = os.getcwd()\n",
" else:\n",
" %cd /content/data/nerfstudio/custom_data/\n",
" uploaded = files.upload()\n",
" dir = os.getcwd()\n",
" preupload_datasets = [os.path.join(dir, f) for f in uploaded.keys()]\n",
" del uploaded\n",
" %cd /content/\n",
"\n",
" if scene == 'upload your images':\n",
" !ns-process-data images --data /content/data/nerfstudio/custom_data/raw_images --output-dir /content/data/nerfstudio/custom_data/\n",
" else:\n",
" video_path = preupload_datasets[0]\n",
" !ns-process-data video --data $video_path --output-dir /content/data/nerfstudio/custom_data/\n",
"\n",
" scene = \"custom_data\"\n",
"else:\n",
" %cd /content/\n",
" !ns-download-data nerfstudio --capture-name=$scene\n",
"\n",
"print(\"Data Processing Succeeded!\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 928
},
"id": "VoKDxqEcjmfC",
"outputId": "d2919aa4-96dd-4e50-829f-289e4208882b"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/content\n",
"\u001b[K\u001b[?25h/tools/node/bin/lt -> /tools/node/lib/node_modules/localtunnel/bin/lt.js\n",
"\u001b[K\u001b[?25h+ localtunnel@2.0.2\n",
"added 22 packages from 22 contributors in 2.07s\n",
"https://viewer.nerf.studio/?websocket_url=wss://cyan-facts-matter-34-91-1-218.loca.lt\n",
"You may need to click Refresh Page after you start training!\n"
]
},
{
"data": {
"text/html": [
"\n",
" <iframe\n",
" width=\"100%\"\n",
" height=\"800\"\n",
" src=\"https://viewer.nerf.studio/?websocket_url=wss://cyan-facts-matter-34-91-1-218.loca.lt\"\n",
" frameborder=\"0\"\n",
" allowfullscreen\n",
" ></iframe>\n",
" "
],
"text/plain": [
"<IPython.lib.display.IFrame at 0x7f1d0da6f950>"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#@markdown <h1>Set up and Start Viewer</h1>\n",
"\n",
"%cd /content\n",
"\n",
"# Install localtunnel\n",
"# We are using localtunnel https://github.com/localtunnel/localtunnel but ngrok could also be used\n",
"!npm install -g localtunnel\n",
"\n",
"# Tunnel port 7007, the default for\n",
"!rm url.txt 2> /dev/null\n",
"get_ipython().system_raw('lt --port 7007 >> url.txt 2>&1 &')\n",
"\n",
"import time\n",
"time.sleep(3) # the previous command needs time to write to url.txt\n",
"\n",
"\n",
"with open('url.txt') as f:\n",
" lines = f.readlines()\n",
"websocket_url = lines[0].split(\": \")[1].strip().replace(\"https\", \"wss\")\n",
"# from nerfstudio.utils.io import load_from_json\n",
"# from pathlib import Path\n",
"# json_filename = \"nerfstudio/nerfstudio/viewer/app/package.json\"\n",
"# version = load_from_json(Path(json_filename))[\"version\"]\n",
"url = f\"https://viewer.nerf.studio/?websocket_url={websocket_url}\"\n",
"print(url)\n",
"print(\"You may need to click Refresh Page after you start training!\")\n",
"from IPython import display\n",
"display.IFrame(src=url, height=800, width=\"100%\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "m_N8_cLfjoXD"
},
"outputs": [],
"source": [
"#@markdown <h1>Start Training</h1>\n",
"\n",
"%cd /content\n",
"if os.path.exists(f\"data/nerfstudio/{scene}/transforms.json\"):\n",
" !ns-train nerfacto --viewer.websocket-port 7007 nerfstudio-data --data data/nerfstudio/$scene --downscale-factor 4\n",
"else:\n",
" display(HTML('<h3 style=\"color:red\">Error: Data processing did not complete</h3>'))\n",
" display(HTML('<h3>Please re-run `Downloading and Processing Data`, or view the FAQ for more info.</h3>'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "WGt8ukG6Htg3",
"outputId": "fa946890-c7d8-4e46-a54e-7231bc5a2059"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[2;36m[19:48:48]\u001b[0m\u001b[2;36m \u001b[0mSkipping \u001b[1;36m0\u001b[0m files in dataset split train. \u001b]8;id=527413;file:///content/nerfstudio/nerfstudio/data/dataparsers/nerfstudio_dataparser.py\u001b\\\u001b[2mnerfstudio_dataparser.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=243595;file:///content/nerfstudio/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#91\u001b\\\u001b[2m91\u001b[0m\u001b]8;;\u001b\\\n",
"\u001b[2;36m \u001b[0m\u001b[2;36m \u001b[0mSkipping \u001b[1;36m0\u001b[0m files in dataset split test. \u001b]8;id=109270;file:///content/nerfstudio/nerfstudio/data/dataparsers/nerfstudio_dataparser.py\u001b\\\u001b[2mnerfstudio_dataparser.py\u001b[0m\u001b]8;;\u001b\\\u001b[2m:\u001b[0m\u001b]8;id=464675;file:///content/nerfstudio/nerfstudio/data/dataparsers/nerfstudio_dataparser.py#91\u001b\\\u001b[2m91\u001b[0m\u001b]8;;\u001b\\\n",
"\u001b[2KLoading data batch \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h/usr/local/lib/python3.7/site-packages/torch/utils/data/dataloader.py:566: UserWarning: This DataLoader will create 4 worker processes in total. Our suggested max number of worker in current system is 2, which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
" cpuset_checked))\n",
"\u001b[2KLoading data batch \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h/usr/local/lib/python3.7/site-packages/torchvision/models/_utils.py:209: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n",
" f\"The parameter '{pretrained_param}' is deprecated since 0.13 and will be removed in 0.15, \"\n",
"/usr/local/lib/python3.7/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=AlexNet_Weights.IMAGENET1K_V1`. You can also use `weights=AlexNet_Weights.DEFAULT` to get the most up-to-date weights.\n",
" warnings.warn(msg)\n",
"Loading latest checkpoint from load_dir\n",
"✅ Done loading checkpoint from \n",
"outputs/data-nerfstudio-poster/nerfacto/\u001b[1;36m2022\u001b[0m-\u001b[1;36m10\u001b[0m-29_192844/nerfstudio_models/step-\u001b[1;36m000014000.\u001b[0mckpt\n",
"\u001b[1;32mCreating trajectory video\u001b[0m\n",
"\u001b[2K🎥 Rendering 🎥 \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m \u001b[31m0.14 fps\u001b[0m \u001b[33m11:47\u001b[0m\n",
"\u001b[2K\u001b[32m( ● )\u001b[0m \u001b[33mSaving video\u001b[0m\n",
"\u001b[1A\u001b[2K\u001b[92m────────────────────────────────────────────── \u001b[0m\u001b[32m 🎉 🎉 🎉 Success 🎉 🎉 🎉\u001b[0m\u001b[92m ──────────────────────────────────────────────\u001b[0m\n",
" \u001b[32mSaved video to renders/output.mp4\u001b[0m \n",
"\u001b[0m"
]
}
],
"source": [
"#@title # Render Video { vertical-output: true }\n",
"#@markdown <h3>Export the camera path from within the viewer, then run this cell.</h3>\n",
"#@markdown <h5>The rendered video should be at renders/output.mp4!</h5>\n",
"\n",
"\n",
"base_dir = \"/content/outputs/data-nerfstudio-\" + scene + \"/nerfacto/\"\n",
"training_run_dir = base_dir + os.listdir(base_dir)[0]\n",
"\n",
"from IPython.core.display import display, HTML\n",
"display(HTML('<h3>Upload the camera path JSON.</h3>'))\n",
"%cd $training_run_dir\n",
"uploaded = files.upload()\n",
"uploaded_camera_path_filename = list(uploaded.keys())[0]\n",
"\n",
"config_filename = training_run_dir + \"/config.yml\"\n",
"camera_path_filename = training_run_dir + \"/\" + uploaded_camera_path_filename\n",
"camera_path_filename = camera_path_filename.replace(\" \", \"\\\\ \").replace(\"(\", \"\\\\(\").replace(\")\", \"\\\\)\")\n",
"\n",
"%cd /content/\n",
"!ns-render --load-config $config_filename --traj filename --camera-path-filename $camera_path_filename --output-path renders/output.mp4"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"collapsed_sections": [],
"include_colab_link": true,
"provenance": []
},
"gpuClass": "standard",
"kernelspec": {
"display_name": "Python 3.8.13 ('nerfstudio')",
"language": "python",
"name": "python3"
},
"language_info": {
"name": "python",
"version": "3.8.13"
},
"vscode": {
"interpreter": {
"hash": "c59f626636933ef1dc834fb3684b382f705301c5306cf8436d2da634c2289783"
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}
================================================
FILE: AutoReconForDens3R/docs/INSTALL.md
================================================
# Installation
## Create a conda environment
```bash
conda create --name auto_recon -y python=3.9
conda activate auto_recon
python -m pip install --upgrade pip setuptools
```
## Clone the repo
```shell
git clone --recurse-submodules git@github.com:zju3dv/AutoRecon.git
```
## Install AutoDecomp
```bash
cd third_party/AutoDecomp
```
Then, please install AutoDecomp based on its [installation guide](https://github.com/zju3dv/AutoDecomp/blob/main/docs/INSTALL.md).
We expect that it is installed in `third_party/AutoDecomp`.
## Install other dependencies
Install pytorch with CUDA (this repo has been tested with CUDA 11.7) and [tiny-cuda-nn](https://github.com/NVlabs/tiny-cuda-nn)
```bash
pip install git+https://github.com/NVlabs/tiny-cuda-nn/#subdirectory=bindings/torch
```
Install faiss following the [official guide](https://github.com/facebookresearch/faiss/blob/main/INSTALL.md)
```bash
# for example
conda install -c conda-forge faiss-gpu
```
## Install AutoRecon
```bash
cd path/to/AutoRecon
pip install -e .
# install tab completion
ns-install-cli
```
================================================
FILE: AutoReconForDens3R/exps/code-release/bmvs/scan1.sh
================================================
scan_name="scan1"
DATA_ROOT=data
INST_REL_DIR=BlendedMVS/$scan_name
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr_idr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# Reconstruction
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_bmvs-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_pairs-from-poses_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/bmvs/scan2.sh
================================================
scan_name="scan2"
DATA_ROOT=data
INST_REL_DIR=BlendedMVS/$scan_name
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr_idr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# Reconstruction
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_bmvs-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_pairs-from-poses_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components False \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/bmvs/scan3.sh
================================================
scan_name="scan3"
DATA_ROOT=data
INST_REL_DIR=BlendedMVS/$scan_name
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr_idr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# Reconstruction
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_bmvs-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_pairs-from-poses_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components False \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/bmvs/scan4.sh
================================================
scan_name="scan4"
DATA_ROOT=data
INST_REL_DIR=BlendedMVS/$scan_name
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr_idr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# Reconstruction
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_bmvs-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_pairs-from-poses_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components False \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/bmvs/scan5.sh
================================================
scan_name="scan5"
DATA_ROOT=data
INST_REL_DIR=BlendedMVS/$scan_name
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr_idr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# Reconstruction
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_bmvs-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_pairs-from-poses_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components False \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/bmvs/scan6.sh
================================================
scan_name="scan6"
DATA_ROOT=data
INST_REL_DIR=BlendedMVS/$scan_name
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr_idr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# Reconstruction
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_bmvs-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_pairs-from-poses_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components False \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/co3d_demo/scan1.sh
================================================
scan_name=scan1
DATA_ROOT=data
INST_REL_DIR=CO3D_DEMO/$scan_name
FORCE_RERUN=True
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
sparse_recon.n_images=40 \
sparse_recon.force_rerun=$FORCE_RERUN \
sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_co3d-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_sequential_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/co3d_demo/scan2.sh
================================================
scan_name=scan2
DATA_ROOT=data
INST_REL_DIR=CO3D_DEMO/$scan_name
FORCE_RERUN=True
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
sparse_recon.n_images=40 \
sparse_recon.force_rerun=$FORCE_RERUN \
sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_co3d-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_sequential_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/co3d_demo/scan3.sh
================================================
scan_name=scan3
DATA_ROOT=data
INST_REL_DIR=CO3D_DEMO/$scan_name
FORCE_RERUN=True
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
sparse_recon.n_images=40 \
sparse_recon.force_rerun=$FORCE_RERUN \
sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_co3d-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_sequential_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/co3d_demo/scan4.sh
================================================
scan_name=scan4
DATA_ROOT=data
INST_REL_DIR=CO3D_DEMO/$scan_name
FORCE_RERUN=True
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
sparse_recon.n_images=40 \
sparse_recon.force_rerun=$FORCE_RERUN \
sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_co3d-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_sequential_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/co3d_demo/scan5.sh
================================================
scan_name=scan5
DATA_ROOT=data
INST_REL_DIR=CO3D_DEMO/$scan_name
FORCE_RERUN=True
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
sparse_recon.n_images=40 \
sparse_recon.force_rerun=$FORCE_RERUN \
sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_co3d-${scan_name}_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_sequential_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# LOG_DIR="outputs/<path_to_log_dir>"
# MC_RES=512 # 1024 (default)
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/run_dens3r_recon.sh
================================================
#/bin/bash
DATA_ROOT=/path/to/your/data
INST_REL_DIR=rel_path/to/your/data
DENS3R_MODEL_PATH=/path/to/pth/file/of/model
N_IMAGES=40
NEUS_TRAIN_RESO_DOWN_FACTOR=0.5
FORCE_RERUN=True
EXP_NAME=neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_demo_cvpr
NERF_MODE=neus-facto-wbg-reg_sep-plane-nerf
# reconstruct scene with dens3r
# python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py \
# --config-name=cvpr \
# data_root=$DATA_ROOT \
# inst_rel_dir=$INST_REL_DIR \
# sparse_recon.n_images=$N_IMAGES \
# sparse_recon.force_rerun=$FORCE_RERUN \
# sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
# triangulation.force_rerun=$FORCE_RERUN \
# triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
# dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1 \
# sfm_mode=dens3r \
# sparse_recon.dens3r_model_ckpt=$DENS3R_MODEL_PATH
# reconstruct object with AutoRecon
# python scripts/train.py \
# $NERF_MODE \
# --experiment-name $EXP_NAME \
# --vis tensorboard \
# --trainer.steps_per_eval_image 2500 \
# --trainer.steps_per_eval_batch 2500 \
# --trainer.max_num_iterations 60001 \
# --trainer.steps_per_save 60000 \
# --pipeline.model.cos_anneal_end 10000 \
# --pipeline.model.plane_height_ratio 0.3 \
# --optimizers.fields.scheduler.max_steps 60001 \
# --pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
# --pipeline.datamanager.camera_res_scale_factor $NEUS_TRAIN_RESO_DOWN_FACTOR \
# --pipeline.model.sdf_field.hash_grid_progressive_training False \
# autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
# --anno_dirname 'dens3r_40_sequential/auto-deocomp_sfm-transformer_cvpr' \
# --camera_filename cameras_cameras_norm-obj-side-2.0.npz \
# --object_filename objects_cameras_norm-obj-side-2.0.npz \
# --parse_images_from_camera_dict True \
# --image_dirname "images" --image_extension ".jpg" \
# --include_image_features False --include_coarse_features False \
# --use_accurate_scene_box True \
# --collider_type 'box_near_far' \
# --near_far 0.1 100.0 \
# --compute_fg_bbox_mask True \
# --force_recompute_fg_bbox_mask False \
# --decomposition_mode regularization \
# --downsample_ptcd -1
# Extract mesh with MC
# LOG_DIR="outputs/$EXP_NAME/$NERF_MODE/data" # default is outputs/$EXP_NAME/$NERF_MODE/date
# MC_RES=512
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/sdfstudio_models/${MESH_FN}"
# python scripts/extract_mesh.py \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/exps/code-release/run_pipeline_demo_low-res.sh
================================================
DATA_ROOT=data
INST_REL_DIR=custom_data_example/co3d_chair
FORCE_RERUN=True
# Coarse decomposition
python third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py --config-name=cvpr \
data_root=$DATA_ROOT \
inst_rel_dir=$INST_REL_DIR \
sparse_recon.n_images=40 \
sparse_recon.force_rerun=$FORCE_RERUN \
sparse_recon.n_feature_workers=1 sparse_recon.n_recon_workers=1 \
triangulation.force_rerun=$FORCE_RERUN \
triangulation.n_feature_workers=1 triangulation.n_recon_workers=1 \
dino_feature.force_extract=$FORCE_RERUN dino_feature.n_workers=1
# reconstruction
# TODO: parse anno_dirname & object_filename from cache
ns-train neus-facto-wbg-reg_sep-plane-nerf \
--experiment-name neusfacto-wbg-reg_sep-plane-nerf_60k_plane-h-ratio-0.3_demo_cvpr \
--vis tensorboard \
--trainer.steps_per_eval_image 2500 \
--trainer.steps_per_eval_batch 2500 \
--trainer.max_num_iterations 60001 \
--trainer.steps_per_save 60000 \
--pipeline.model.cos_anneal_end 10000 \
--pipeline.model.plane_height_ratio 0.3 \
--optimizers.fields.scheduler.max_steps 60001 \
--pipeline.datamanager.eval_camera_res_scale_factor 0.25 \
--pipeline.model.sdf_field.hash_grid_progressive_training False \
autorecon-data --data $DATA_ROOT/$INST_REL_DIR \
--anno_dirname 'triangulate_loftr-720000_sequential_np-10/auto-deocomp_sfm-transformer_cvpr' \
--camera_filename cameras_cameras_norm-obj-side-2.0.npz \
--object_filename objects_cameras_norm-obj-side-2.0.npz \
--parse_images_from_camera_dict True \
--image_dirname "images" --image_extension ".jpg" \
--include_image_features False --include_coarse_features False \
--use_accurate_scene_box True \
--collider_type 'box_near_far' \
--near_far 0.1 100.0 \
--compute_fg_bbox_mask True \
--force_recompute_fg_bbox_mask False \
--decomposition_mode regularization \
--downsample_ptcd -1
# Extract mesh with MC
# LOG_DIR="path_to_log_dir"
# MC_RES=512
# MESH_FN="extracted_mesh_res-${MC_RES}.ply"
# MESH_PATH="${LOG_DIR}/${MESH_FN}"
# ns-extract-mesh \
# --load-config $LOG_DIR/config.yml \
# --load-dir $LOG_DIR/sdfstudio_models \
# --output-path $MESH_PATH \
# --chunk_size 25000 --store_float16 True \
# --resolution $MC_RES \
# --use_train_scene_box True \
# --seg_aware_sdf False \
# --remove_internal_geometry None \
# --remove_non_maximum_connected_components True \
# --close_holes False --simplify_mesh_final False
================================================
FILE: AutoReconForDens3R/nerfstudio/__init__.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/__init__.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/camera_optimizers.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pose and Intrinsics Optimizers
"""
from __future__ import annotations
import functools
from dataclasses import dataclass, field
from typing import Type, Union
import torch
import tyro
from torch import nn
from torchtyping import TensorType
from typing_extensions import Literal, assert_never
from nerfstudio.cameras.lie_groups import exp_map_SE3, exp_map_SO3xR3
from nerfstudio.configs import base_config as cfg
from nerfstudio.engine.optimizers import AdamOptimizerConfig
from nerfstudio.engine.schedulers import SchedulerConfig
from nerfstudio.utils import poses as pose_utils
@dataclass
class CameraOptimizerConfig(cfg.InstantiateConfig):
"""Configuration of optimization for camera poses."""
_target: Type = field(default_factory=lambda: CameraOptimizer)
mode: Literal["off", "SO3xR3", "SE3"] = "off"
"""Pose optimization strategy to use. If enabled, we recommend SO3xR3."""
position_noise_std: float = 0.0
"""Noise to add to initial positions. Useful for debugging."""
orientation_noise_std: float = 0.0
"""Noise to add to initial orientations. Useful for debugging."""
optimizer: AdamOptimizerConfig = AdamOptimizerConfig(lr=6e-4, eps=1e-15)
"""ADAM parameters for camera optimization."""
scheduler: SchedulerConfig = SchedulerConfig(max_steps=10000)
"""Learning rate scheduler for camera optimizer.."""
param_group: tyro.conf.Suppress[str] = "camera_opt"
"""Name of the parameter group used for pose optimization. Can be any string that doesn't conflict with other
groups."""
class CameraOptimizer(nn.Module):
"""Layer that modifies camera poses to be optimized as well as the field during training."""
config: CameraOptimizerConfig
def __init__(
self,
config: CameraOptimizerConfig,
num_cameras: int,
device: Union[torch.device, str],
**kwargs, # pylint: disable=unused-argument
) -> None:
super().__init__()
self.config = config
self.num_cameras = num_cameras
self.device = device
# Initialize learnable parameters.
if self.config.mode == "off":
pass
elif self.config.mode in ("SO3xR3", "SE3"):
self.pose_adjustment = torch.nn.Parameter(torch.zeros((num_cameras, 6), device=device))
else:
assert_never(self.config.mode)
# Initialize pose noise; useful for debugging.
if config.position_noise_std != 0.0 or config.orientation_noise_std != 0.0:
assert config.position_noise_std >= 0.0 and config.orientation_noise_std >= 0.0
std_vector = torch.tensor(
[config.position_noise_std] * 3 + [config.orientation_noise_std] * 3, device=device
)
self.pose_noise = exp_map_SE3(torch.normal(torch.zeros((num_cameras, 6), device=device), std_vector))
else:
self.pose_noise = None
def forward(
self,
indices: TensorType["num_cameras"],
) -> TensorType["num_cameras", 3, 4]:
"""Indexing into camera adjustments.
Args:
indices: indices of Cameras to optimize.
Returns:
Tranformation matrices from optimized camera coordinates coordinates
to given camera coordinates.
"""
outputs = []
# Apply learned transformation delta.
if self.config.mode == "off":
pass
elif self.config.mode == "SO3xR3":
outputs.append(exp_map_SO3xR3(self.pose_adjustment[indices, :]))
elif self.config.mode == "SE3":
outputs.append(exp_map_SE3(self.pose_adjustment[indices, :]))
else:
assert_never(self.config.mode)
# Apply initial pose noise.
if self.pose_noise is not None:
outputs.append(self.pose_noise[indices, :, :])
# Return: identity if no transforms are needed, otherwise multiply transforms together.
if len(outputs) == 0:
# Note that using repeat() instead of tile() here would result in unnecessary copies.
return torch.eye(4, device=self.device)[None, :3, :4].tile(indices.shape[0], 1, 1)
return functools.reduce(pose_utils.multiply, outputs)
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/camera_paths.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Code for camera paths.
"""
from typing import Any, Dict, Optional, Tuple
import torch
import nerfstudio.utils.poses as pose_utils
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.camera_utils import get_interpolated_poses_many
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.viewer.server.utils import three_js_perspective_camera_focal_length
def get_interpolated_camera_path(cameras: Cameras, steps: int) -> Cameras:
"""Generate a camera path between two cameras.
Args:
cameras: Cameras object containing intrinsics of all cameras.
steps: The number of steps to interpolate between the two cameras.
Returns:
A new set of cameras along a path.
"""
Ks = cameras.get_intrinsics_matrices().cpu().numpy()
poses = cameras.camera_to_worlds().cpu().numpy()
poses, Ks = get_interpolated_poses_many(poses, Ks, steps_per_transition=steps)
cameras = Cameras(fx=Ks[:, 0, 0], fy=Ks[:, 1, 1], cx=Ks[0, 0, 2], cy=Ks[0, 1, 2], camera_to_worlds=poses)
return cameras
def get_spiral_path(
camera: Cameras,
steps: int = 30,
radius: Optional[float] = None,
radiuses: Optional[Tuple[float]] = None,
rots: int = 2,
zrate: float = 0.5,
) -> Cameras:
"""
Returns a list of camera in a sprial trajectory.
Args:
camera: The camera to start the spiral from.
steps: The number of cameras in the generated path.
radius: The radius of the spiral for all xyz directions.
radiuses: The list of radii for the spiral in xyz directions.
rots: The number of rotations to apply to the camera.
zrate: How much to change the z position of the camera.
Returns:
A spiral camera path.
"""
assert radius is not None or radiuses is not None, "Either radius or radiuses must be specified."
assert camera.ndim == 1, "We assume only one batch dim here"
if radius is not None and radiuses is None:
rad = torch.tensor([radius] * 3, device=camera.device)
elif radiuses is not None and radius is None:
rad = torch.tensor(radiuses, device=camera.device)
else:
raise ValueError("Only one of radius or radiuses must be specified.")
camera = camera.flatten()
up = camera.camera_to_worlds[0, :3, 2] # scene is z up
focal = torch.min(camera.fx[0], camera.fy[0])
target = torch.tensor([0, 0, -focal], device=camera.device) # camera looking in -z direction
c2w = camera.camera_to_worlds[0]
c2wh_global = pose_utils.to4x4(c2w)
local_c2whs = []
for theta in torch.linspace(0.0, 2.0 * torch.pi * rots, steps + 1)[:-1]:
center = (
torch.tensor([torch.cos(theta), -torch.sin(theta), -torch.sin(theta * zrate)], device=camera.device) * rad
)
lookat = center - target
c2w = camera_utils.viewmatrix(lookat, up, center)
c2wh = pose_utils.to4x4(c2w)
local_c2whs.append(c2wh)
new_c2ws = []
for local_c2wh in local_c2whs:
c2wh = torch.matmul(c2wh_global, local_c2wh)
new_c2ws.append(c2wh[:3, :4])
new_c2ws = torch.stack(new_c2ws, dim=0)
return Cameras(
fx=camera.fx[0],
fy=camera.fy[0],
cx=camera.cx[0],
cy=camera.cy[0],
camera_to_worlds=new_c2ws,
)
def get_path_from_json(camera_path: Dict[str, Any]) -> Cameras:
"""Takes a camera path dictionary and returns a trajectory as a Camera instance.
Args:
camera_path: A dictionary of the camera path information coming from the viewer.
Returns:
A Cameras instance with the camera path.
"""
image_height = camera_path["render_height"]
image_width = camera_path["render_width"]
c2ws = []
fxs = []
fys = []
for camera in camera_path["camera_path"]:
# pose
c2w = torch.tensor(camera["camera_to_world"]).view(4, 4)[:3]
c2ws.append(c2w)
# field of view
fov = camera["fov"]
focal_length = three_js_perspective_camera_focal_length(fov, image_height)
fxs.append(focal_length)
fys.append(focal_length)
camera_to_worlds = torch.stack(c2ws, dim=0)
fx = torch.tensor(fxs)
fy = torch.tensor(fys)
return Cameras(
fx=fx,
fy=fy,
cx=image_width / 2,
cy=image_height / 2,
camera_to_worlds=camera_to_worlds,
)
def get_path_from_npz(camera_path: Dict[str, Any]) -> Cameras:
image_height, image_width = int(camera_path["height"]), int(camera_path["width"])
K33_shared = torch.tensor(camera_path['K33'], dtype=torch.float32)
T44_c2w_all = torch.tensor(camera_path['T44_c2w'], dtype=torch.float32) # (n, 4, 4)
K33 = K33_shared[None].repeat(T44_c2w_all.shape[0], 1, 1)
return Cameras(
T44_c2w_all[:, :3],
fx=K33[:, 0, 0], fy=K33[:, 1, 1], cx=K33[:, 0, 2], cy=K33[:, 1, 2],
width=image_width, height=image_height,
)
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/camera_utils.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Camera transformation helper code.
"""
import math
from typing import List, Optional, Tuple
import numpy as np
import torch
from torchtyping import TensorType
from typing_extensions import Literal
_EPS = np.finfo(float).eps * 4.0
def unit_vector(data, axis: Optional[int] = None) -> np.ndarray:
"""Return ndarray normalized by length, i.e. Euclidean norm, along axis.
Args:
axis: the axis along which to normalize into unit vector
out: where to write out the data to. If None, returns a new np ndarray
"""
data = np.array(data, dtype=np.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(np.dot(data, data))
return data
length = np.atleast_1d(np.sum(data * data, axis))
np.sqrt(length, length)
if axis is not None:
length = np.expand_dims(length, axis)
data /= length
return data
def quaternion_from_matrix(matrix, isprecise: bool = False) -> np.ndarray:
"""Return quaternion from rotation matrix.
Args:
matrix: rotation matrix to obtain quaternion
isprecise: if True, input matrix is assumed to be precise rotation matrix and a faster algorithm is used.
"""
M = np.array(matrix, dtype=np.float64, copy=False)[:4, :4]
if isprecise:
q = np.empty((4,))
t = np.trace(M)
if t > M[3, 3]:
q[0] = t
q[3] = M[1, 0] - M[0, 1]
q[2] = M[0, 2] - M[2, 0]
q[1] = M[2, 1] - M[1, 2]
else:
i, j, k = 1, 2, 3
if M[1, 1] > M[0, 0]:
i, j, k = 2, 3, 1
if M[2, 2] > M[i, i]:
i, j, k = 3, 1, 2
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
else:
m00 = M[0, 0]
m01 = M[0, 1]
m02 = M[0, 2]
m10 = M[1, 0]
m11 = M[1, 1]
m12 = M[1, 2]
m20 = M[2, 0]
m21 = M[2, 1]
m22 = M[2, 2]
# symmetric matrix K
K = np.array(
[
[m00 - m11 - m22, 0.0, 0.0, 0.0],
[m01 + m10, m11 - m00 - m22, 0.0, 0.0],
[m02 + m20, m12 + m21, m22 - m00 - m11, 0.0],
[m21 - m12, m02 - m20, m10 - m01, m00 + m11 + m22],
]
)
K /= 3.0
# quaternion is eigenvector of K that corresponds to largest eigenvalue
w, V = np.linalg.eigh(K)
q = V[np.array([3, 0, 1, 2]), np.argmax(w)]
if q[0] < 0.0:
np.negative(q, q)
return q
def quaternion_slerp(quat0, quat1, fraction: float, spin: int = 0, shortestpath: bool = True) -> np.ndarray:
"""Return spherical linear interpolation between two quaternions.
Args:
quat0: first quaternion
quat1: second quaternion
fraction: how much to interpolate between quat0 vs quat1 (if 0, closer to quat0; if 1, closer to quat1)
spin: how much of an additional spin to place on the interpolation
shortestpath: whether to return the short or long path to rotation
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if q0 is None or q1 is None:
raise ValueError("Input quaternions invalid.")
if fraction == 0.0:
return q0
if fraction == 1.0:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
np.negative(q1, q1)
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def quaternion_matrix(quaternion) -> np.ndarray:
"""Return homogeneous rotation matrix from quaternion.
Args:
quaternion: value to convert to matrix
"""
q = np.array(quaternion, dtype=np.float64, copy=True)
n = np.dot(q, q)
if n < _EPS:
return np.identity(4)
q *= math.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array(
[
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0], 0.0],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0], 0.0],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2], 0.0],
[0.0, 0.0, 0.0, 1.0],
]
)
def get_interpolated_poses(pose_a, pose_b, steps: int = 10) -> List[float]:
"""Return interpolation of poses with specified number of steps.
Args:
poseA: first pose
poseB: second pose
steps: number of steps the interpolated pose path should contain
"""
quat_a = quaternion_from_matrix(pose_a[:3, :3])
quat_b = quaternion_from_matrix(pose_b[:3, :3])
ts = np.linspace(0, 1, steps)
quats = [quaternion_slerp(quat_a, quat_b, t) for t in ts]
trans = [(1 - t) * pose_a[:3, 3] + t * pose_b[:3, 3] for t in ts]
poses_ab = []
for quat, tran in zip(quats, trans):
pose = np.identity(4)
pose[:3, :3] = quaternion_matrix(quat)[:3, :3]
pose[:3, 3] = tran
poses_ab.append(pose)
return poses_ab
def get_interpolated_k(k_a, k_b, steps: int = 10) -> TensorType[3, 4]:
"""
Returns interpolated path between two camera poses with specified number of steps.
Args:
KA: camera matrix 1
KB: camera matrix 2
steps: number of steps the interpolated pose path should contain
"""
Ks = []
ts = np.linspace(0, 1, steps)
for t in ts:
new_k = k_a * (1.0 - t) + k_b * t
Ks.append(new_k)
return Ks
def get_interpolated_poses_many(
poses: TensorType["num_poses", 3, 4],
Ks: TensorType["num_poses", 3, 3],
steps_per_transition=10,
) -> Tuple[TensorType["num_poses", 3, 4], TensorType["num_poses", 3, 3]]:
"""Return interpolated poses for many camera poses.
Args:
poses: list of camera poses
Ks: list of camera intrinsics
steps_per_transition: number of steps per transition
Returns:
tuple of new poses and intrinsics
"""
traj = []
Ks = []
for idx in range(poses.shape[0] - 1):
pose_a = poses[idx]
pose_b = poses[idx + 1]
poses_ab = get_interpolated_poses(pose_a, pose_b, steps=steps_per_transition)
traj += poses_ab
Ks += get_interpolated_k(Ks[idx], Ks[idx + 1], steps_per_transition)
return torch.stack(traj, dim=0), torch.stack(Ks, dim=0)
def normalize(x) -> TensorType[...]:
"""Returns a normalized vector."""
return x / torch.linalg.norm(x)
def viewmatrix(lookat, up, pos) -> TensorType[...]:
"""Returns a camera transformation matrix.
Args:
lookat: The direction the camera is looking.
up: The upward direction of the camera.
pos: The position of the camera.
Returns:
A camera transformation matrix.
"""
vec2 = normalize(lookat)
vec1_avg = normalize(up)
vec0 = normalize(torch.cross(vec1_avg, vec2))
vec1 = normalize(torch.cross(vec2, vec0))
m = torch.stack([vec0, vec1, vec2, pos], 1)
return m
def get_distortion_params(
k1: float = 0.0,
k2: float = 0.0,
k3: float = 0.0,
k4: float = 0.0,
p1: float = 0.0,
p2: float = 0.0,
) -> TensorType[...]:
"""Returns a distortion parameters matrix.
Args:
k1: The first radial distortion parameter.
k2: The second radial distortion parameter.
k3: The third radial distortion parameter.
k4: The fourth radial distortion parameter.
p1: The first tangential distortion parameter.
p2: The second tangential distortion parameter.
Returns:
torch.Tensor: A distortion parameters matrix.
"""
return torch.Tensor([k1, k2, k3, k4, p1, p2])
@torch.jit.script
def _compute_residual_and_jacobian(
x: torch.Tensor,
y: torch.Tensor,
xd: torch.Tensor,
yd: torch.Tensor,
distortion_params: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor,]:
"""Auxiliary function of radial_and_tangential_undistort() that computes residuals and jacobians.
Adapted from MultiNeRF:
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/camera_utils.py#L427-L474
Args:
x: The updated x coordinates.
y: The updated y coordinates.
xd: The distorted x coordinates.
yd: The distorted y coordinates.
distortion_params: The distortion parameters [k1, k2, k3, k4, p1, p2].
Returns:
The residuals (fx, fy) and jacobians (fx_x, fx_y, fy_x, fy_y).
"""
k1 = distortion_params[..., 0]
k2 = distortion_params[..., 1]
k3 = distortion_params[..., 2]
k4 = distortion_params[..., 3]
p1 = distortion_params[..., 4]
p2 = distortion_params[..., 5]
# let r(x, y) = x^2 + y^2;
# d(x, y) = 1 + k1 * r(x, y) + k2 * r(x, y) ^2 + k3 * r(x, y)^3 +
# k4 * r(x, y)^4;
r = x * x + y * y
d = 1.0 + r * (k1 + r * (k2 + r * (k3 + r * k4)))
# The perfect projection is:
# xd = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2);
# yd = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2);
#
# Let's define
#
# fx(x, y) = x * d(x, y) + 2 * p1 * x * y + p2 * (r(x, y) + 2 * x^2) - xd;
# fy(x, y) = y * d(x, y) + 2 * p2 * x * y + p1 * (r(x, y) + 2 * y^2) - yd;
#
# We are looking for a solution that satisfies
# fx(x, y) = fy(x, y) = 0;
fx = d * x + 2 * p1 * x * y + p2 * (r + 2 * x * x) - xd
fy = d * y + 2 * p2 * x * y + p1 * (r + 2 * y * y) - yd
# Compute derivative of d over [x, y]
d_r = k1 + r * (2.0 * k2 + r * (3.0 * k3 + r * 4.0 * k4))
d_x = 2.0 * x * d_r
d_y = 2.0 * y * d_r
# Compute derivative of fx over x and y.
fx_x = d + d_x * x + 2.0 * p1 * y + 6.0 * p2 * x
fx_y = d_y * x + 2.0 * p1 * x + 2.0 * p2 * y
# Compute derivative of fy over x and y.
fy_x = d_x * y + 2.0 * p2 * y + 2.0 * p1 * x
fy_y = d + d_y * y + 2.0 * p2 * x + 6.0 * p1 * y
return fx, fy, fx_x, fx_y, fy_x, fy_y
@torch.jit.script
def radial_and_tangential_undistort(
coords: torch.Tensor,
distortion_params: torch.Tensor,
eps: float = 1e-3,
max_iterations: int = 10,
) -> torch.Tensor:
"""Computes undistorted coords given opencv distortion parameters.
Addapted from MultiNeRF
https://github.com/google-research/multinerf/blob/b02228160d3179300c7d499dca28cb9ca3677f32/internal/camera_utils.py#L477-L509
Args:
coords: The distorted coordinates.
distortion_params: The distortion parameters [k1, k2, k3, k4, p1, p2].
eps: The epsilon for the convergence.
max_iterations: The maximum number of iterations to perform.
Returns:
The undistorted coordinates.
"""
# Initialize from the distorted point.
x = coords[..., 0]
y = coords[..., 1]
for _ in range(max_iterations):
fx, fy, fx_x, fx_y, fy_x, fy_y = _compute_residual_and_jacobian(
x=x, y=y, xd=coords[..., 0], yd=coords[..., 1], distortion_params=distortion_params
)
denominator = fy_x * fx_y - fx_x * fy_y
x_numerator = fx * fy_y - fy * fx_y
y_numerator = fy * fx_x - fx * fy_x
step_x = torch.where(torch.abs(denominator) > eps, x_numerator / denominator, torch.zeros_like(denominator))
step_y = torch.where(torch.abs(denominator) > eps, y_numerator / denominator, torch.zeros_like(denominator))
x = x + step_x
y = y + step_y
return torch.stack([x, y], dim=-1)
def rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3, 3]:
"""Compute the rotation matrix that rotates vector a to vector b.
Args:
a: The vector to rotate.
b: The vector to rotate to.
Returns:
The rotation matrix.
"""
a = a / torch.linalg.norm(a)
b = b / torch.linalg.norm(b)
v = torch.cross(a, b)
c = torch.dot(a, b)
# If vectors are exactly opposite, we add a little noise to one of them
if c < -1 + 1e-8:
eps = (torch.rand(3) - 0.5) * 0.01
return rotation_matrix(a + eps, b)
s = torch.linalg.norm(v)
skew_sym_mat = torch.Tensor(
[
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0],
]
)
return torch.eye(3) + skew_sym_mat + skew_sym_mat @ skew_sym_mat * ((1 - c) / (s**2 + 1e-8))
def auto_orient_and_center_poses(
poses: TensorType["num_poses":..., 4, 4], method: Literal["pca", "up", "none"] = "up", center_poses: bool = True
) -> TensorType["num_poses":..., 3, 4]:
"""Orients and centers the poses. We provide two methods for orientation: pca and up.
pca: Orient the poses so that the principal component of the points is aligned with the axes.
This method works well when all of the cameras are in the same plane.
up: Orient the poses so that the average up vector is aligned with the z axis.
This method works well when images are not at arbitrary angles.
Args:
poses: The poses to orient.
method: The method to use for orientation.
center_poses: If True, the poses are centered around the origin.
Returns:
The oriented poses.
"""
translation = poses[..., :3, 3]
mean_translation = torch.mean(translation, dim=0)
translation_diff = translation - mean_translation
if center_poses:
translation = mean_translation
else:
translation = torch.zeros_like(mean_translation)
if method == "pca":
_, eigvec = torch.linalg.eigh(translation_diff.T @ translation_diff)
eigvec = torch.flip(eigvec, dims=(-1,))
if torch.linalg.det(eigvec) < 0:
eigvec[:, 2] = -eigvec[:, 2]
transform = torch.cat([eigvec, eigvec @ -translation[..., None]], dim=-1)
oriented_poses = transform @ poses
if oriented_poses.mean(axis=0)[2, 1] < 0:
oriented_poses[:, 1:3] = -1 * oriented_poses[:, 1:3]
elif method == "up":
up = torch.mean(poses[:, :3, 1], dim=0)
up = up / torch.linalg.norm(up)
rotation = rotation_matrix(up, torch.Tensor([0, 0, 1]))
transform = torch.cat([rotation, rotation @ -translation[..., None]], dim=-1)
oriented_poses = transform @ poses
elif method == "none":
transform = torch.eye(4)
transform[:3, 3] = -translation
transform = transform[:3, :]
oriented_poses = transform @ poses
return oriented_poses, transform
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/cameras.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Camera Models
"""
import base64
import math
from dataclasses import dataclass
from enum import Enum, auto
from typing import Dict, List, Optional, Tuple, Union
import cv2
import torch
import torchvision
from torch.nn.functional import normalize
from torchtyping import TensorType
import nerfstudio.utils.poses as pose_utils
from nerfstudio.cameras import camera_utils
from nerfstudio.cameras.rays import RayBundle
from nerfstudio.utils.tensor_dataclass import TensorDataclass
class CameraType(Enum):
"""Supported camera types."""
PERSPECTIVE = auto()
FISHEYE = auto()
EQUIRECTANGULAR = auto()
CAMERA_MODEL_TO_TYPE = {
"SIMPLE_PINHOLE": CameraType.PERSPECTIVE,
"PINHOLE": CameraType.PERSPECTIVE,
"SIMPLE_RADIAL": CameraType.PERSPECTIVE,
"RADIAL": CameraType.PERSPECTIVE,
"OPENCV": CameraType.PERSPECTIVE,
"OPENCV_FISHEYE": CameraType.FISHEYE,
"EQUIRECTANGULAR": CameraType.EQUIRECTANGULAR,
}
@dataclass(init=False)
class Cameras(TensorDataclass):
"""Dataparser outputs for the image dataset and the ray generator.
Note: currently only supports cameras with the same principal points and types. The reason we type
the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras
down the line in cases where your batches of camera data don't come from the same cameras.
If a single value is provided, it is broadcasted to all cameras.
Args:
camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format
fx: Focal length x
fy: Focal length y
cx: Principal point x
cy: Principal point y
width: Image width
height: Image height
distortion_params: OpenCV 6 radial distortion coefficients
camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.
times: Timestamps for each camera
"""
camera_to_worlds: TensorType["num_cameras":..., 3, 4]
fx: TensorType["num_cameras":..., 1]
fy: TensorType["num_cameras":..., 1]
cx: TensorType["num_cameras":..., 1]
cy: TensorType["num_cameras":..., 1]
width: TensorType["num_cameras":..., 1]
height: TensorType["num_cameras":..., 1]
distortion_params: Optional[TensorType["num_cameras":..., 6]]
camera_type: TensorType["num_cameras":..., 1]
times: Optional[TensorType["num_cameras", 1]]
def __init__(
self,
camera_to_worlds: TensorType["batch_c2ws":..., 3, 4],
fx: Union[TensorType["batch_fxs":..., 1], float],
fy: Union[TensorType["batch_fys":..., 1], float],
cx: Union[TensorType["batch_cxs":..., 1], float],
cy: Union[TensorType["batch_cys":..., 1], float],
width: Optional[Union[TensorType["batch_ws":..., 1], int]] = None,
height: Optional[Union[TensorType["batch_hs":..., 1], int]] = None,
distortion_params: Optional[TensorType["batch_dist_params":..., 6]] = None,
camera_type: Optional[
Union[
TensorType["batch_cam_types":..., 1],
int,
List[CameraType],
CameraType,
]
] = CameraType.PERSPECTIVE,
times: Optional[TensorType["num_cameras"]] = None,
):
"""Initializes the Cameras object.
Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]
(in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or
TensorType[1] (in the case of the rest of the elements). The dimensions before that are
considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast
all the tensors to be the same batch dimension. This means you can use any combination of the
input types in the function signature and it won't break. Your batch size for all tensors
must be broadcastable to the same size, and the resulting number of batch dimensions will be
the batch dimension with the largest number of dimensions.
"""
# This will notify the tensordataclass that we have a field with more than 1 dimension
self._field_custom_dimensions = {"camera_to_worlds": 2}
self.camera_to_worlds = camera_to_worlds
# fx fy calculation
self.fx = self._init_get_fc_xy(fx, "fx") # @dataclass's post_init will take care of broadcasting
self.fy = self._init_get_fc_xy(fy, "fy") # @dataclass's post_init will take care of broadcasting
# cx cy calculation
self.cx = self._init_get_fc_xy(cx, "cx") # @dataclass's post_init will take care of broadcasting
self.cy = self._init_get_fc_xy(cy, "cy") # @dataclass's post_init will take care of broadcasting
# Distortion Params Calculation:
self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting
# @dataclass's post_init will take care of broadcasting
self.height = self._init_get_height_width(height, self.cy)
self.width = self._init_get_height_width(width, self.cx)
self.camera_type = self._init_get_camera_type(camera_type)
self.times = self._init_get_times(times)
self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors
def _init_get_fc_xy(self, fc_xy, name):
"""
Parses the input focal length / principle point x or y and returns a tensor of the correct shape
Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we
just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.
Args:
fc_xy: The focal length / principle point x or y
name: The name of the variable. Used for error messages
"""
if isinstance(fc_xy, float):
fc_xy = torch.Tensor([fc_xy], device=self.device)
elif isinstance(fc_xy, torch.Tensor):
if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:
fc_xy = fc_xy.unsqueeze(-1)
fc_xy = fc_xy.to(self.device)
else:
raise ValueError(f"{name} must be a float or tensor, got {type(fc_xy)}")
return fc_xy
def _init_get_camera_type(
self,
camera_type: Union[
TensorType["batch_cam_types":..., 1], TensorType["batch_cam_types":...], int, List[CameraType], CameraType
],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument camera_type
Camera Type Calculation:
If CameraType, convert to int and then to tensor, then broadcast to all cameras
If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
Args:
camera_type: camera_type argument from __init__()
"""
if isinstance(camera_type, CameraType):
camera_type = torch.tensor([camera_type.value], device=self.device)
elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):
camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)
elif isinstance(camera_type, int):
camera_type = torch.tensor([camera_type], device=self.device)
elif isinstance(camera_type, torch.Tensor):
assert not torch.is_floating_point(
camera_type
), f"camera_type tensor must be of type int, not: {camera_type.dtype}"
camera_type = camera_type.to(self.device)
if camera_type.ndim == 0 or camera_type.shape[-1] != 1:
camera_type = camera_type.unsqueeze(-1)
# assert torch.all(
# camera_type.view(-1)[0] == camera_type
# ), "Batched cameras of different camera_types will be allowed in the future."
else:
raise ValueError(
'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor["num_cameras"]. \
Received: '
+ str(type(camera_type))
)
return camera_type
def _init_get_height_width(
self,
h_w: Union[TensorType["batch_hws":..., 1], TensorType["batch_hws":...], int, None],
c_x_y: TensorType["batch_cxys":...],
) -> TensorType["num_cameras":..., 1]:
"""
Parses the __init__() argument for height or width
Height/Width Calculation:
If int, first go to tensor and then broadcast to all cameras
If tensor, broadcast to all cameras
If none, use cx or cy * 2
Else raise error
Args:
h_w: height or width argument from __init__()
c_x_y: cx or cy for when h_w == None
"""
if isinstance(h_w, int):
h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)
elif isinstance(h_w, torch.Tensor):
assert not torch.is_floating_point(h_w), f"height and width tensor must be of type int, not: {h_w.dtype}"
h_w = h_w.to(torch.int64).to(self.device)
if h_w.ndim == 0 or h_w.shape[-1] != 1:
h_w = h_w.unsqueeze(-1)
# assert torch.all(h_w == h_w.view(-1)[0]), "Batched cameras of different h, w will be allowed in the future."
elif h_w is None:
h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))
else:
raise ValueError("Height must be an int, tensor, or None, received: " + str(type(h_w)))
return h_w
def _init_get_times(self, times):
if times is None:
times = None
elif isinstance(times, torch.Tensor):
if times.ndim == 0 or times.shape[-1] != 1:
times = times.unsqueeze(-1).to(self.device)
else:
raise ValueError(f"times must be None or a tensor, got {type(times)}")
return times
@property
def device(self):
"""Returns the device that the camera is on."""
return self.camera_to_worlds.device
@property
def image_height(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.height
@property
def image_width(self) -> TensorType["num_cameras":..., 1]:
"""Returns the height of the images."""
return self.width
@property
def is_jagged(self):
"""
Returns whether or not the cameras are "jagged" (i.e. the height and widths are different, meaning that
you cannot concatenate the image coordinate maps together)
"""
h_jagged = not torch.all(self.height == self.height.view(-1)[0])
w_jagged = not torch.all(self.width == self.width.view(-1)[0])
return h_jagged or w_jagged
def get_image_coords(
self, pixel_offset: float = 0.5, index: Optional[Tuple] = None
) -> TensorType["height", "width", 2]:
"""This gets the image coordinates of one of the cameras in this object.
If no index is specified, it will return the maximum possible sized height / width image coordinate map,
by looking at the maximum height and width of all the cameras in this object.
Args:
pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)
index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th
flattened camera
Returns:
Grid of image coordinates.
"""
if index is None:
image_height = torch.max(self.image_height.view(-1))
image_width = torch.max(self.image_width.view(-1))
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
else:
image_height = self.image_height[index].item()
image_width = self.image_width[index].item()
image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing="ij")
image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates
return image_coords
def generate_rays( # pylint: disable=too-many-statements
self,
camera_indices: Union[TensorType["num_rays":..., "num_cameras_batch_dims"], int],
coords: Optional[TensorType["num_rays":..., 2]] = None,
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
keep_shape: Optional[bool] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices.
This function will standardize the input arguments and then call the _generate_rays_from_coords function
to generate the rays. Our goal is to parse the arguments and then get them into the right shape:
- camera_indices: (num_rays:..., num_cameras_batch_dims)
- coords: (num_rays:..., 2)
- camera_opt_to_camera: (num_rays:..., 3, 4) or None
- distortion_params_delta: (num_rays:..., 6) or None
Read the docstring for _generate_rays_from_coords for more information on how we generate the rays
after we have standardized the arguments.
We are only concerned about different combinations of camera_indices and coords matrices, and the following
are the 4 cases we have to deal with:
1. isinstance(camera_indices, int) and coords == None
- In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)
2. isinstance(camera_indices, int) and coords != None
- In this case, we broadcast camera_indices to the same batch dim as coords
3. not isinstance(camera_indices, int) and coords == None
- In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast
all our other args to match the new definition of num_rays := (h, w) + num_rays
4. not isinstance(camera_indices, int) and coords != None
- In this case, we have nothing to do, only check that the arguments are of the correct shape
There is one more edge case we need to be careful with: when we have "jagged cameras" (ie: different heights
and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.
When coords == None (ie: when we render out the whole image associated with this camera), we run into problems
since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,
we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,
regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.
camera_opt_to_camera: Optional transform for the camera to world matrices.
distortion_params_delta: Optional delta for the distortion parameters.
keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise
keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the
camera_indices and coords tensors (if we can).
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords.
"""
# Check the argument types to make sure they're valid and all shaped correctly
assert isinstance(camera_indices, (torch.Tensor, int)), "camera_indices must be a tensor or int"
assert coords is None or isinstance(coords, torch.Tensor), "coords must be a tensor or None"
assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)
assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)
if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):
num_rays_shape = camera_indices.shape[:-1]
errormsg = "Batch dims of inputs must match when inputs are all tensors"
assert coords.shape[:-1] == num_rays_shape, errormsg
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg
# If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later
if not self.shape:
cameras = self.reshape((1,))
assert torch.all(
torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0
), "Can only index into single camera with no batch dimensions if index is zero"
else:
cameras = self
# If the camera indices are an int, then we need to make sure that the camera batch is 1D
if isinstance(camera_indices, int):
assert (
len(cameras.shape) == 1
), "camera_indices must be a tensor if cameras are batched with more than 1 batch dimension"
camera_indices = torch.tensor([camera_indices], device=cameras.device)
assert camera_indices.shape[-1] == len(
cameras.shape
), "camera_indices must have shape (num_rays:..., num_cameras_batch_dims)"
# If keep_shape is True, then we need to make sure that the camera indices in question
# are all the same height and width and can actually be batched while maintaining the image
# shape
if keep_shape is True:
assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(
cameras.width[camera_indices] == cameras.width[camera_indices[0]]
), "Can only keep shape if all cameras have the same height and width"
# If the cameras don't all have same height / width, if coords is not none, we will need to generate
# a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.
# Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial
if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):
index_dim = camera_indices.shape[-1]
camera_indices = camera_indices.reshape(-1, index_dim)
_coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]
camera_indices = torch.cat(
[index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],
)
coords = torch.cat(_coords, dim=0)
assert coords.shape[0] == camera_indices.shape[0]
# Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them
# The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords
# is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,
# each image in camera_indices has to have the same shape since otherwise we would have error'd when
# we checked keep_shape is valid or we aren't jagged.
if coords is None:
index_dim = camera_indices.shape[-1]
index = camera_indices.reshape(-1, index_dim)[0]
coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)
coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)
coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)
camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None
camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))
if camera_opt_to_camera is not None
else None
)
distortion_params_delta = ( # (h, w, num_rays, 6) or None
distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))
if distortion_params_delta is not None
else None
)
# If camera indices was an int or coords was none, we need to broadcast our indices along batch dims
camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)
# Checking our tensors have been standardized
assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)
assert camera_indices.shape[-1] == len(cameras.shape)
assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]
assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]
# This will do the actual work of generating the rays now that we have standardized the inputs
# raybundle.shape == (num_rays) when done
# pylint: disable=protected-access
raybundle = cameras._generate_rays_from_coords(
camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion
)
# If we have mandated that we don't keep the shape, then we flatten
if keep_shape is False:
raybundle = raybundle.flatten()
# TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,
# so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour
# that we haven't caught yet with tests
return raybundle
# pylint: disable=too-many-statements
def _generate_rays_from_coords(
self,
camera_indices: TensorType["num_rays":..., "num_cameras_batch_dims"],
coords: TensorType["num_rays":..., 2],
camera_opt_to_camera: Optional[TensorType["num_rays":..., 3, 4]] = None,
distortion_params_delta: Optional[TensorType["num_rays":..., 6]] = None,
disable_distortion: bool = False,
) -> RayBundle:
"""Generates rays for the given camera indices and coords where self isn't jagged
This is a fairly complex function, so let's break this down slowly.
Shapes involved:
- num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated
- num_cameras_batch_dims: This is the number of dimensions of our camera
Args:
camera_indices: Camera indices of the flattened cameras object to generate rays for.
The shape of this is such that indexing into camera_indices["num_rays":...] will return the
index into each batch dimension of the camera in order to get the correct camera specified by
"num_rays".
Example:
>>> cameras = Cameras(...)
>>> cameras.shape
(2, 3, 4)
>>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3
>>> camera_indices.shape
(3,)
>>> coords = torch.tensor([1,1])
>>> coords.shape
(2,)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()
>>> out_rays.shape
()
>>> camera_indices = torch.tensor([[0,0,0]])
>>> camera_indices.shape
(1, 3)
>>> coords = torch.tensor([[1,1]])
>>> coords.shape
(1, 2)
>>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)
# This will generate a RayBundle with a single ray for the
# camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)
# since we added an extra dimension in front of camera_indices
>>> out_rays.shape
(1,)
If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape
The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the
output shape and if you index into the output RayBundle with some indices [i:...], if you index into
camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch
indices into the original cameras object corresponding to that ray (ie: you will get the camera
from our batched cameras corresponding to the ray at RayBundle[i:...]).
coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning
height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will
get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].
camera_opt_to_camera: Optional transform for the camera to world matrices.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].
distortion_params_delta: Optional delta for the distortion parameters.
In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you
the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].
disable_distortion: If True, disables distortion.
Returns:
Rays for the given camera indices and coords. RayBundle.shape == num_rays
"""
# Make sure we're on the right devices
camera_indices = camera_indices.to(self.device)
coords = coords.to(self.device)
# Checking to make sure everything is of the right shape and type
num_rays_shape = camera_indices.shape[:-1]
assert camera_indices.shape == num_rays_shape + (self.ndim,)
assert coords.shape == num_rays_shape + (2,)
assert coords.shape[-1] == 2
assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)
assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)
# Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all
# of our output rays at each dimension of our cameras object
true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]
# Get all our focal lengths, principal points and make sure they are the right shapes
y = coords[..., 0] # (num_rays,) get rid of the last dimension
x = coords[..., 1] # (num_rays,) get rid of the last dimension
fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)
cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)
assert (
y.shape == num_rays_shape
and x.shape == num_rays_shape
and fx.shape == num_rays_shape
and fy.shape == num_rays_shape
and cx.shape == num_rays_shape
and cy.shape == num_rays_shape
), (
str(num_rays_shape)
+ str(y.shape)
+ str(x.shape)
+ str(fx.shape)
+ str(fy.shape)
+ str(cx.shape)
+ str(cy.shape)
)
# Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)
# Also make sure the shapes are correct
coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)
coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)
assert (
coord.shape == num_rays_shape + (2,)
and coord_x_offset.shape == num_rays_shape + (2,)
and coord_y_offset.shape == num_rays_shape + (2,)
)
# Stack image coordinates and image coordinates offset by 1, check shapes too
coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Undistorts our images according to our distortion parameters
if not disable_distortion:
distortion_params = None
if self.distortion_params is not None:
distortion_params = self.distortion_params[true_indices]
if distortion_params_delta is not None:
distortion_params = distortion_params + distortion_params_delta
elif distortion_params_delta is not None:
distortion_params = distortion_params_delta
# Do not apply distortion for equirectangular images
if distortion_params is not None:
mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
coord_mask = torch.stack([mask, mask, mask], dim=0)
if mask.any():
coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(
coord_stack[coord_mask, :].reshape(3, -1, 2),
distortion_params[mask, :],
).reshape(-1, 2)
# Make sure after we have undistorted our images, the shapes are still correct
assert coord_stack.shape == (3,) + num_rays_shape + (2,)
# Gets our directions for all our rays in camera coordinates and checks shapes at the end
# Here, directions_stack is of shape (3, num_rays, 3)
# directions_stack[0] is the direction for ray in camera coordinates
# directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x
# directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y
cam_types = torch.unique(self.camera_type, sorted=False)
directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)
if CameraType.PERSPECTIVE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()
directions_stack[..., 2][mask] = -1.0
if CameraType.FISHEYE.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))
theta = torch.clip(theta, 0.0, math.pi)
sin_theta = torch.sin(theta)
directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()
directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()
directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)
if CameraType.EQUIRECTANGULAR.value in cam_types:
mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)
mask = torch.stack([mask, mask, mask], dim=0)
# For equirect, fx = fy = height = width/2
# Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2
theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed
phi = torch.pi * (0.5 - coord_stack[..., 1])
# use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)
directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()
directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()
directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()
for value in cam_types:
if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:
raise ValueError(f"Camera type {value} not supported.")
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
c2w = self.camera_to_worlds[true_indices]
assert c2w.shape == num_rays_shape + (3, 4)
if camera_opt_to_camera is not None:
c2w = pose_utils.multiply(c2w, camera_opt_to_camera)
rotation = c2w[..., :3, :3] # (..., 3, 3)
assert rotation.shape == num_rays_shape + (3, 3)
directions_stack = torch.sum(
directions_stack[..., None, :] * rotation, dim=-1
) # (..., 1, 3) * (..., 3, 3) -> (..., 3)
directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)
directions_norm = directions_norm[0]
directions_stack = normalize(directions_stack, dim=-1)
assert directions_stack.shape == (3,) + num_rays_shape + (3,)
origins = c2w[..., :3, 3] # (..., 3)
assert origins.shape == num_rays_shape + (3,)
directions = directions_stack[0]
assert directions.shape == num_rays_shape + (3,)
# norms of the vector going between adjacent coords, giving us dx and dy per output ray
dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # ("num_rays":...,)
dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # ("num_rays":...,)
assert dx.shape == num_rays_shape and dy.shape == num_rays_shape
pixel_area = (dx * dy)[..., None] # ("num_rays":..., 1)
assert pixel_area.shape == num_rays_shape + (1,)
times = self.times[camera_indices, 0] if self.times is not None else None
return RayBundle(
origins=origins,
directions=directions,
pixel_area=pixel_area,
camera_indices=camera_indices,
directions_norm=directions_norm,
times=times,
)
def to_json(
self, camera_idx: int, image: Optional[TensorType["height", "width", 2]] = None, max_size: Optional[int] = None
) -> Dict:
"""Convert a camera to a json dictionary.
Args:
camera_idx: Index of the camera to convert.
image: An image in range [0, 1] that is encoded to a base64 string.
max_size: Max size to resize the image to if present.
Returns:
A JSON representation of the camera
"""
flattened = self.flatten()
json_ = {
"type": "PinholeCamera",
"cx": flattened[camera_idx].cx.item(),
"cy": flattened[camera_idx].cy.item(),
"fx": flattened[camera_idx].fx.item(),
"fy": flattened[camera_idx].fy.item(),
"camera_to_world": self.camera_to_worlds[camera_idx].tolist(),
"camera_index": camera_idx,
"times": flattened[camera_idx].times.item() if self.times is not None else None,
}
if image is not None:
image_uint8 = (image * 255).detach().type(torch.uint8)
if max_size is not None:
image_uint8 = image_uint8.permute(2, 0, 1)
image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore
image_uint8 = image_uint8.permute(1, 2, 0)
image_uint8 = image_uint8.cpu().numpy()
data = cv2.imencode(".jpg", image_uint8)[1].tobytes()
json_["image"] = str("data:image/jpeg;base64," + base64.b64encode(data).decode("ascii"))
return json_
def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
"""Returns the intrinsic matrices for each camera.
Returns:
Pinhole camera intrinsics matrices
"""
K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)
K[..., 0, 0] = self.fx.squeeze(-1)
K[..., 1, 1] = self.fy.squeeze(-1)
K[..., 0, 2] = self.cx.squeeze(-1)
K[..., 1, 2] = self.cy.squeeze(-1)
K[..., 2, 2] = 1.0
return K
def rescale_output_resolution(
self, scaling_factor: Union[TensorType["num_cameras":...], TensorType["num_cameras":..., 1], float, int]
) -> None:
"""Rescale the output resolution of the cameras.
Args:
scaling_factor: Scaling factor to apply to the output resolution.
"""
if isinstance(scaling_factor, (float, int)):
scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:
scaling_factor = scaling_factor.unsqueeze(-1)
elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):
pass
else:
raise ValueError(
f"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}."
)
self.fx = self.fx * scaling_factor
self.fy = self.fy * scaling_factor
self.cx = self.cx * scaling_factor
self.cy = self.cy * scaling_factor
self.height = (self.height * scaling_factor).to(torch.int64)
self.width = (self.width * scaling_factor).to(torch.int64)
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/lie_groups.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Helper for Lie group operations. Currently only used for pose optimization.
"""
import torch
from torchtyping import TensorType
# We make an exception on snake case conventions because SO3 != so3.
def exp_map_SO3xR3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3, 4]: # pylint: disable=invalid-name
"""Compute the exponential map of the direct product group `SO(3) x R^3`.
This can be used for learning pose deltas on SE(3), and is generally faster than `exp_map_SE3`.
Args:
tangent_vector: Tangent vector; length-3 translations, followed by an `so(3)` tangent vector.
Returns:
[R|t] tranformation matrices.
"""
# code for SO3 map grabbed from pytorch3d and stripped down to bare-bones
log_rot = tangent_vector[:, 3:]
nrms = (log_rot * log_rot).sum(1)
rot_angles = torch.clamp(nrms, 1e-4).sqrt()
rot_angles_inv = 1.0 / rot_angles
fac1 = rot_angles_inv * rot_angles.sin()
fac2 = rot_angles_inv * rot_angles_inv * (1.0 - rot_angles.cos())
skews = torch.zeros((log_rot.shape[0], 3, 3), dtype=log_rot.dtype, device=log_rot.device)
skews[:, 0, 1] = -log_rot[:, 2]
skews[:, 0, 2] = log_rot[:, 1]
skews[:, 1, 0] = log_rot[:, 2]
skews[:, 1, 2] = -log_rot[:, 0]
skews[:, 2, 0] = -log_rot[:, 1]
skews[:, 2, 1] = log_rot[:, 0]
skews_square = torch.bmm(skews, skews)
ret = torch.zeros(tangent_vector.shape[0], 3, 4, dtype=tangent_vector.dtype, device=tangent_vector.device)
ret[:, :3, :3] = (
fac1[:, None, None] * skews
+ fac2[:, None, None] * skews_square
+ torch.eye(3, dtype=log_rot.dtype, device=log_rot.device)[None]
)
# Compute the translation
ret[:, :3, 3] = tangent_vector[:, :3]
return ret
def exp_map_SE3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3, 4]: # pylint: disable=invalid-name
"""Compute the exponential map `se(3) -> SE(3)`.
This can be used for learning pose deltas on `SE(3)`.
Args:
tangent_vector: A tangent vector from `se(3)`.
Returns:
[R|t] tranformation matrices.
"""
tangent_vector_lin = tangent_vector[:, :3].view(-1, 3, 1)
tangent_vector_ang = tangent_vector[:, 3:].view(-1, 3, 1)
theta = torch.linalg.norm(tangent_vector_ang, dim=1).unsqueeze(1)
theta2 = theta**2
theta3 = theta**3
near_zero = theta < 1e-2
non_zero = torch.ones(1, dtype=tangent_vector.dtype, device=tangent_vector.device)
theta_nz = torch.where(near_zero, non_zero, theta)
theta2_nz = torch.where(near_zero, non_zero, theta2)
theta3_nz = torch.where(near_zero, non_zero, theta3)
# Compute the rotation
sine = theta.sin()
cosine = torch.where(near_zero, 8 / (4 + theta2) - 1, theta.cos())
sine_by_theta = torch.where(near_zero, 0.5 * cosine + 0.5, sine / theta_nz)
one_minus_cosine_by_theta2 = torch.where(near_zero, 0.5 * sine_by_theta, (1 - cosine) / theta2_nz)
ret = torch.zeros(tangent_vector.shape[0], 3, 4).to(dtype=tangent_vector.dtype, device=tangent_vector.device)
ret[:, :3, :3] = one_minus_cosine_by_theta2 * tangent_vector_ang @ tangent_vector_ang.transpose(1, 2)
ret[:, 0, 0] += cosine.view(-1)
ret[:, 1, 1] += cosine.view(-1)
ret[:, 2, 2] += cosine.view(-1)
temp = sine_by_theta.view(-1, 1) * tangent_vector_ang.view(-1, 3)
ret[:, 0, 1] -= temp[:, 2]
ret[:, 1, 0] += temp[:, 2]
ret[:, 0, 2] += temp[:, 1]
ret[:, 2, 0] -= temp[:, 1]
ret[:, 1, 2] -= temp[:, 0]
ret[:, 2, 1] += temp[:, 0]
# Compute the translation
sine_by_theta = torch.where(near_zero, 1 - theta2 / 6, sine_by_theta)
one_minus_cosine_by_theta2 = torch.where(near_zero, 0.5 - theta2 / 24, one_minus_cosine_by_theta2)
theta_minus_sine_by_theta3_t = torch.where(near_zero, 1.0 / 6 - theta2 / 120, (theta - sine) / theta3_nz)
ret[:, :, 3:] = sine_by_theta * tangent_vector_lin
ret[:, :, 3:] += one_minus_cosine_by_theta2 * torch.cross(tangent_vector_ang, tangent_vector_lin, dim=1)
ret[:, :, 3:] += theta_minus_sine_by_theta3_t * (
tangent_vector_ang @ (tangent_vector_ang.transpose(1, 2) @ tangent_vector_lin)
)
return ret
================================================
FILE: AutoReconForDens3R/nerfstudio/cameras/rays.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Some ray datastructures.
"""
import random
import warnings
from dataclasses import dataclass
from typing import Callable, Dict, Optional, Tuple
import torch
from torchtyping import TensorType
from rich.console import Console
from nerfstudio.utils.math import Gaussians, conical_frustum_to_gaussian
from nerfstudio.utils.tensor_dataclass import TensorDataclass
CONSOLE = Console(width=120)
NEGATIVE_MIN_DELTA_WARNING = ("[bold yellow]RaySamples.deltas includes negative values, "
"which might be caused by nears > fars / bad weights (e.g., all zeros) "
"/ numerical issues caused by nears being too closed with fars. "
"This will lead to negative weights and NaN interlevel loss.")
_WARNED = False
@dataclass
class Frustums(TensorDataclass):
"""Describes region of space as a frustum."""
origins: TensorType["bs":..., 3]
"""xyz coordinate for ray origin."""
directions: TensorType["bs":..., 3]
"""Direction of ray."""
starts: TensorType["bs":..., 1]
"""Where the frustum starts along a ray."""
ends: TensorType["bs":..., 1]
"""Where the frustum ends along a ray."""
pixel_area: TensorType["bs":..., 1]
"""Projected area of pixel a distance 1 away from origin."""
offsets: Optional[TensorType["bs":..., 3]] = None
"""Offsets for each sample position"""
def get_positions(self) -> TensorType[..., 3]:
"""Calulates "center" position of frustum. Not weighted by mass.
Returns:
xyz positions.
"""
pos = self.origins + self.directions * (self.starts + self.ends) / 2
if self.offsets is not None:
pos = pos + self.offsets
return pos
def set_offsets(self, offsets):
"""Sets offsets for this frustum for computing positions"""
self.offsets = offsets
def get_start_positions(self) -> TensorType[..., 3]:
"""Calulates "start" position of frustum. We use start positions for MonoSDF
because when we use error bounded sampling, we need to upsample many times.
It's hard to merge two set of ray samples while keeping the mid points fixed.
Every time we up sample the points the mid points will change and
therefore we need to evaluate all points again which is 3 times slower.
But we can skip the evaluation of sdf value if we use start position instead of mid position
because after we merge the points, the starting point is the same and only the delta is changed.
Returns:
xyz positions.
"""
return self.origins + self.directions * self.starts
def get_gaussian_blob(self) -> Gaussians:
"""Calculates guassian approximation of conical frustum.
Resturns:
Conical frustums approximated by gaussian distribution.
"""
# Cone radius is set such that the square pixel_area matches the cone area.
cone_radius = torch.sqrt(self.pixel_area) / 1.7724538509055159 # r = sqrt(pixel_area / pi)
if self.offsets is not None:
raise NotImplementedError()
return conical_frustum_to_gaussian(
origins=self.origins,
directions=self.directions,
starts=self.starts,
ends=self.ends,
radius=cone_radius,
)
@classmethod
def get_mock_frustum(cls, device="cpu") -> "Frustums":
"""Helper function to generate a placeholder frustum.
Returns:
A size 1 frustum with meaningless values.
"""
return Frustums(
origins=torch.ones((1, 3)).to(device),
directions=torch.ones((1, 3)).to(device),
starts=torch.ones((1, 1)).to(device),
ends=torch.ones((1, 1)).to(device),
pixel_area=torch.ones((1, 1)).to(device),
)
@dataclass
class RaySamples(TensorDataclass):
"""Samples along a ray"""
frustums: Frustums
"""Frustums along ray."""
camera_indices: Optional[TensorType["bs":..., 1]] = None
"""Camera index."""
deltas: Optional[TensorType["bs":..., 1]] = None
""""width" of each sample."""
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None
"""Start of normalized bin edges along ray [0,1], before warping is applied, ie. linear in disparity sampling."""
spacing_to_euclidean_fn: Optional[Callable] = None
"""Function to convert bins to euclidean distance."""
metadata: Optional[Dict[str, TensorType["bs":..., "latent_dims"]]] = None
"""addtional information relevant to generating ray samples"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def __post_init__(self) -> None:
super().__post_init__()
if self.deltas is not None:
negative_deltas_mask = self.deltas < 0
if negative_deltas_mask.any():
n_negative_deltas = negative_deltas_mask.sum()
self.deltas[negative_deltas_mask] = 0.
global _WARNED # TODO: use a context manager to count the number of time this warning is triggered
if not _WARNED:
_WARNED = True
CONSOLE.print(NEGATIVE_MIN_DELTA_WARNING)
CONSOLE.print(f"[bold yellow]Truncate negative deltas to 0! ({n_negative_deltas} in total)")
def get_weights(self, densities: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
weights = torch.nan_to_num(weights) # nan weights might be caused by inf densities
if torch.isnan(weights).any():
__import__('ipdb').set_trace()
return weights
def get_weights_and_transmittance(
self, densities: TensorType[..., "num_samples", 1]
) -> Tuple[TensorType[..., "num_samples", 1], TensorType[..., "num_samples", 1]]:
"""Return weights and transmittance based on predicted densities
Args:
densities: Predicted densities for samples along ray
Returns:
Weights and transmittance for each sample
"""
delta_density = self.deltas * densities
alphas = 1 - torch.exp(-delta_density)
transmittance = torch.cumsum(delta_density[..., :-1, :], dim=-2)
transmittance = torch.cat(
[torch.zeros((*transmittance.shape[:1], 1, 1), device=densities.device), transmittance], dim=-2
)
transmittance = torch.exp(-transmittance) # [..., "num_samples"]
weights = alphas * transmittance # [..., "num_samples"]
weights = torch.nan_to_num(weights) # nan weights might be caused by inf densities
return weights, transmittance
def get_weights_from_alphas(self, alphas: TensorType[..., "num_samples", 1]) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
weights = torch.nan_to_num(weights)
return weights
def get_weights_and_transmittance_from_alphas(
self, alphas: TensorType[..., "num_samples", 1]
) -> TensorType[..., "num_samples", 1]:
"""Return weights based on predicted alphas
Args:
alphas: Predicted alphas (maybe from sdf) for samples along ray
Returns:
Weights for each sample
"""
transmittance = torch.cumprod(
torch.cat([torch.ones((*alphas.shape[:1], 1, 1), device=alphas.device), 1.0 - alphas + 1e-7], 1), 1
) # [..., "num_samples"]
weights = alphas * transmittance[:, :-1, :] # [..., "num_samples"]
weights = torch.nan_to_num(weights)
if torch.isnan(weights).any():
__import__('ipdb').set_trace()
return weights, transmittance
@dataclass
class RayBundle(TensorDataclass):
"""A bundle of ray parameters."""
# TODO(ethan): make sure the sizes with ... are correct
origins: TensorType[..., 3]
"""Ray origins (XYZ)"""
directions: TensorType[..., 3]
"""Unit ray direction vector"""
pixel_area: TensorType[..., 1]
"""Projected area of pixel a distance 1 away from origin"""
directions_norm: Optional[TensorType[..., 1]] = None
"""Norm of ray direction vector before normalization"""
camera_indices: Optional[TensorType[..., 1]] = None
"""Camera indices"""
nears: Optional[TensorType[..., 1]] = None
"""Distance along ray to start sampling"""
fars: Optional[TensorType[..., 1]] = None
"""Rays Distance along ray to stop sampling"""
# metadata: Optional[Dict[str, TensorType["num_rays", "latent_dims"]]] = None
metadata: Optional[Dict[str, TensorType["bs":..., "latent_dims"]]] = None
"""Additional metadata or data needed for interpolation, will mimic shape of rays"""
times: Optional[TensorType[..., 1]] = None
"""Times at which rays are sampled"""
def set_camera_indices(self, camera_index: int) -> None:
"""Sets all of the the camera indices to a specific camera index.
Args:
camera_index: Camera index.
"""
self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index
def __len__(self):
num_rays = torch.numel(self.origins) // self.origins.shape[-1]
return num_rays
def sample(self, num_rays: int) -> "RayBundle":
"""Returns a RayBundle as a subset of rays.
Args:
num_rays: Number of rays in output RayBundle
Returns:
RayBundle with subset of rays.
"""
assert num_rays <= len(self)
indices = random.sample(range(len(self)), k=num_rays)
return self[indices]
def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> "RayBundle":
"""Flattens RayBundle and extracts chunk given start and end indicies.
Args:
start_idx: Start index of RayBundle chunk.
end_idx: End index of RayBundle chunk.
Returns:
Flattened RayBundle with end_idx-start_idx rays.
"""
return self.flatten()[start_idx:end_idx]
def get_ray_samples(
self,
bin_starts: TensorType["bs":..., "num_samples", 1],
bin_ends: TensorType["bs":..., "num_samples", 1],
spacing_starts: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_ends: Optional[TensorType["bs":..., "num_samples", 1]] = None,
spacing_to_euclidean_fn: Optional[Callable] = None,
) -> RaySamples:
"""Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.
Args:
bin_starts: Distance from origin to start of bin.
bin_ends: Distance from origin to end of bin.
Returns:
Samples projected along ray.
"""
deltas = bin_ends - bin_starts
# if deltas.min() < 0:
# CONSOLE.log(NEGATIVE_MIN_DELTA_WARNING)
# # if nears and fars are too close to each other (e.g., the ray is intersecting the edge region of an aabb),
# # the spacing between ray samples might not be handled properly using float32.
# __import__('ipdb').set_trace()
if self.camera_indices is not None:
camera_indices = self.camera_indices[..., None]
else:
camera_indices = None
shaped_raybundle_fields = self[..., None]
frustums = Frustums(
origins=shaped_raybundle_fields.origins, # [..., 1, 3]
directions=shaped_raybundle_fields.directions, # [..., 1, 3]
starts=bin_starts, # [..., num_samples, 1]
ends=bin_ends, # [..., num_samples, 1]
pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]
)
ray_samples = RaySamples(
frustums=frustums,
camera_indices=camera_indices, # [..., 1, 1]
deltas=deltas, # [..., num_samples, 1]
spacing_starts=spacing_starts, # [..., num_samples, 1]
spacing_ends=spacing_ends, # [..., num_samples, 1]
spacing_to_euclidean_fn=spacing_to_euclidean_fn,
metadata=shaped_raybundle_fields.metadata,
times=None if self.times is None else self.times[..., None], # [..., 1, 1]
)
return ray_samples
================================================
FILE: AutoReconForDens3R/nerfstudio/configs/__init__.py
================================================
# Copyright 2022 The Nerfstudio Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
================================================
FILE: AutoReconForDens3R/nerfstudio/configs/autorecon/__init__.py
================================================
from __future__ import annotations
from typing import Dict, Sequence
from functools import partial
import tyro
from sklearn.decomposition import PCA
from nerfstudio.cameras.camera_optimizers import CameraOptimizerConfig
from nerfstudio.configs.base_config import (
Config,
SchedulerConfig,
TrainerConfig,
ViewerConfig,
)
from nerfstudio.data.datamanagers.base_datamanager import (
FlexibleDataManagerConfig,
VanillaDataManagerConfig,
)
from nerfstudio.data.datamanagers.semantic_datamanager import SemanticDataManagerConfig
from nerfstudio.data.datamanagers.autorecon_datamanager import AutoReconDataManagerConfig
from nerfstudio.data.datamanagers.variable_res_datamanager import (
VariableResDataManagerConfig,
)
from nerfstudio.data.dataparsers.blender_dataparser import BlenderDataParserConfig
from nerfstudio.data.dataparsers.dnerf_dataparser import DNeRFDataParserConfig
from nerfstudio.data.dataparsers.friends_dataparser import FriendsDataParserConfig
from nerfstudio.data.dataparsers.nerfstudio_dataparser import NerfstudioDataParserConfig
from nerfstudio.data.dataparsers.phototourism_dataparser import (
PhototourismDataParserConfig,
)
from nerfstudio.data.dataparsers.sdfstudio_dataparser import SDFStudioDataParserConfig
from nerfstudio.data.dataparsers.autorecon_dataparser import AutoReconDataParserConfig
from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate
from nerfstudio.engine.optimizers import AdamOptimizerConfig, RAdamOptimizerConfig, AdamWOptimizerConfig
from nerfstudio.engine.schedulers import (
ExponentialSchedulerConfig,
MultiStepSchedulerConfig,
NeuSSchedulerConfig,
AutoReconSchedulerConfig
)
from nerfstudio.field_components.temporal_distortions import TemporalDistortionKind
from nerfstudio.fields.sdf_field import SDFFieldConfig
from nerfstudio.fields.feature_field import FeatureFieldConfig, FeatureSegFieldConfig
from nerfstudio.models.dto import DtoOModelConfig
from nerfstudio.models.instant_ngp import InstantNGPModelConfig
from nerfstudio.models.mipnerf import MipNerfModel
from nerfstudio.models.nerfacto import NerfactoModelConfig
from nerfstudio.models.neuralreconW import NeuralReconWModelConfig
from nerfstudio.models.neus import NeuSModelConfig
from nerfstudio.models.neus_acc import NeuSAccModelConfig
from nerfstudio.models.neus_facto import NeuSFactoModelConfig
from nerfstudio.models.neus_facto_dff import NeuSFactoDFFModelConfig
from nerfstudio.models.neus_facto_reg import NeuSFactoRegModelConfig
from nerfstudio.models.distilled_neus_facto import DistilledNeuSFactoModelConfig
from nerfstudio.models.semantic_nerfw import SemanticNerfWModelConfig
from nerfstudio.models.tensorf import TensoRFModelConfig
from nerfstudio.models.unisurf import UniSurfModelConfig
from nerfstudio.models.vanilla_nerf import NeRFModel, VanillaModelConfig
from nerfstudio.models.volsdf import VolSDFModelConfig
from nerfstudio.pipelines.base_pipeline import (
FlexibleInputPipelineConfig,
VanillaPipelineConfig,
)
from nerfstudio.pipelines.dynamic_batch import DynamicBatchPipelineConfig
from nerfstudio.utils.func_utils import get_first_element
from nerfstudio.utils.pointclouds import BasicPointClouds
from .baseline import neus_wbg, neus_wbg_mlp
from .common import (
neusfacto_autorecon, neus_facto_wbg, neus_facto_wbg_BgProbNet,
neus_facto_wbg_medium, neus_facto_wbg_tiny, neus_facto_wbg_tiny_long_schedule
)
from .distilled_neusfacto import distilled_neus_facto_wbg, debug_distilled_neus_facto_wbg
from .feature_field import neus_facto_wbg_fast_dff, neus_facto_dff_wbg
from .neusfacto_fast import neus_facto_wbg_fast
from .regularization import neus_facto_wbg_reg, neus_facto_wbg_reg_sep_plane, neus_facto_wbg_reg_sep_plane_nerf
from .semantic_nerf import autorecon_semantic_nerf
method_configs: Dict[str, Config] = {
"neus-wbg": neus_wbg,
"neus-wbg_mlp": neus_wbg_mlp,
"neus-facto-autorecon": neusfacto_autorecon,
"neus-facto-wbg": neus_facto_wbg,
"neus-facto-wbg_bg-prob-net": neus_facto_wbg_BgProbNet,
"neus-facto-wbg_medium": neus_facto_wbg_medium,
"neus-facto-wbg_tiny": neus_facto_wbg_tiny,
"neus-facto-wbg_tiny_long-schedule": neus_facto_wbg_tiny_long_schedule,
"distilled-neus-facto-wbg": distilled_neus_facto_wbg,
"debug_distilled-neus-facto-wbg": debug_distilled_neus_facto_wbg,
"neus-facto-wbg-fast_dff": neus_facto_wbg_fast_dff,
"neus-facto-dff-wbg": neus_facto_dff_wbg,
"neus-facto-wbg-fast": neus_facto_wbg_fast,
"neus-facto-wbg-reg": neus_facto_wbg_reg,
"neus-facto-wbg-reg_sep-plane": neus_facto_wbg_reg_sep_plane,
"neus-facto-wbg-reg_sep-plane-nerf": neus_facto_wbg_reg_sep_plane_nerf,
"autorecon_semantic-nerfw": autorecon_semantic_nerf
}
================================================
FILE: AutoReconForDens3R/nerfstudio/configs/autorecon/baseline.py
================================================
from . import *
# TODO: neus w/o hash grid + NeRF++ scene parameterization
# neus (MLP-based) w/ a BG Model (l2 scene contraction)
neus_wbg_mlp = Config(
method_name="neus-wbg_mlp",
trainer=TrainerConfig(
steps_per_eval_image=2500,
steps_per_eval_batch=2500,
steps_per_save=50000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=300000,
mixed_precision=False,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
dataparser=SDFStudioDataParserConfig(),
train_num_rays_per_batch=512,
eval_num_rays_per_batch=512,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
),
model=NeuSModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=True,
bias=0.2,
beta_init=0.3,
inside_outside=False,
),
scene_contraction_order="none",
bg_sampler_type="lin_disp", # TODO: ablate "uniform_lin_disp"
background_model="mlp",
use_fg_aware_scene_contraction=False, # TODO: True might lead to better results
bg_use_appearance_embedding=False,
use_average_appearance_embedding=False,
eval_num_rays_per_chunk=1024
),
),
optimizers={
"fields": {
"optimizer": AdamOptimizerConfig(lr=5e-4, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=5000, learning_rate_alpha=0.05, max_steps=300000),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=5e-4, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=5000, learning_rate_alpha=0.05, max_steps=300000),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
# (deprecated) neus with a BG Model
neus_wbg = Config(
method_name="neus-wbg",
trainer=TrainerConfig(
steps_per_eval_image=1000,
steps_per_eval_batch=1000,
steps_per_save=20000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=60001, # TODO: 20001 is enough?
mixed_precision=False,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
dataparser=SDFStudioDataParserConfig(),
train_num_rays_per_batch=1024,
eval_num_rays_per_batch=1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
),
model=NeuSModelConfig(
sdf_field=SDFFieldConfig(
use_grid_feature=True,
num_layers=2,
num_layers_color=2,
hidden_dim=256,
bias=0.5,
beta_init=0.3,
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
),
cos_anneal_end=10000,
bg_use_appearance_embedding=False,
use_average_appearance_embedding=False,
background_model="grid",
# TODO: check bg model size
eval_num_rays_per_chunk=1024,
),
),
optimizers={ # TODO: use higher lr for grid feature
"fields": {
"optimizer": AdamOptimizerConfig(lr=5e-4, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1500, learning_rate_alpha=0.05, max_steps=60001),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=5e-4, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1500, learning_rate_alpha=0.05, max_steps=60001),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
================================================
FILE: AutoReconForDens3R/nerfstudio/configs/autorecon/common.py
================================================
from . import *
# assume the fg object is bounded by a unit cube, and the entire scene is
# contracted to [-2, 2] as done in MipNeRF-360
neusfacto_autorecon = Config(
method_name="neus-facto-autorecon",
trainer=TrainerConfig(
steps_per_eval_image=1000,
steps_per_eval_batch=1000,
steps_per_save=20000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=60001, # 20k iters is not enough for high quality reconstruction
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
dataparser=AutoReconDataParserConfig(),
train_num_rays_per_batch=2048,
eval_num_rays_per_batch=1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=NeuSFactoModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=True,
use_grid_feature=True,
num_layers=2,
num_layers_color=2,
hidden_dim=256,
bias=0.5,
beta_init=0.3,
direction_encoding_type="sh",
use_appearance_embedding=False,
inside_outside=False,
spatial_normalization_region='full', # [-2, 2] -> [0, 1] for feature grid interpolation
),
cos_anneal_end=10000,
background_model="none",
eval_num_rays_per_chunk=1024,
proposal_use_uniform_sampler=False,
proposal_net_spatial_normalization_region="full" # [-2, 2] -> [0, 1] for feature grid interpolation
),
),
optimizers={
"proposal_networks": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": MultiStepSchedulerConfig(max_steps=60001),
},
"fields": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1500, learning_rate_alpha=0.05, max_steps=60001),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
# neus-facto with a BG Model
# This model has inferior fg reconstructions comparing to the neus-facto model.
neus_facto_wbg = Config(
method_name="neus-facto-wbg",
trainer=TrainerConfig(
steps_per_eval_image=2500,
steps_per_eval_batch=2500,
steps_per_save=60000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=60001,
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
# TODO: set dataparser args
dataparser=AutoReconDataParserConfig(), # NOTE: cannot set init args here, which would not be recognised.
train_num_rays_per_batch=2048,
eval_num_rays_per_batch=1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=NeuSFactoModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=True,
use_grid_feature=True,
num_layers=2,
num_layers_color=2,
hidden_dim=256,
bias=0.5,
beta_init=0.3,
direction_encoding_type="sh",
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
),
cos_anneal_end=10000,
proposal_net_spatial_normalization_region="fg",
eval_num_rays_per_chunk=1024,
use_average_appearance_embedding=False,
background_model="grid",
bg_use_appearance_embedding=False,
use_fg_aware_scene_contraction=True,
fg_aware_scene_contraction_alpha=15.0, # TODO: 15.0 -> 5.0
bg_sampler_type="uniform_lin_disp",
num_samples_outside=64,
),
),
optimizers={
"proposal_networks": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": MultiStepSchedulerConfig(max_steps=60001),
},
"fields": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1500, learning_rate_alpha=0.05, max_steps=60001),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1500, learning_rate_alpha=0.05, max_steps=60001),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
# neus-facto with a BG Model (w/ proposal-net sampler)
# This model has inferior fg reconstructions comparing to the neus-facto model.
neus_facto_wbg_BgProbNet = Config(
method_name="neus-facto-wbg_bg-prop-net",
trainer=TrainerConfig(
steps_per_eval_image=2500,
steps_per_eval_batch=2500,
steps_per_save=60000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=60001,
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
# TODO: set dataparser args
dataparser=AutoReconDataParserConfig(), # NOTE: cannot set init args here, which would not be recognised.
train_num_rays_per_batch=2048,
eval_num_rays_per_batch=2048,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=NeuSFactoModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=True,
use_grid_feature=True,
num_layers=2,
num_layers_color=2,
hidden_dim=256,
bias=0.1, # 0.5 -> 0.1
beta_init=0.3,
direction_encoding_type="sh",
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
hash_grid_progressive_training=False,
hash_grid_progressive_training_iters=0, # TODO: run ablation
spatial_normalization_region="aabb", # to better utilize the feature grid
weight_norm=False, # weight_norm might lead to NaNs during training
),
cos_anneal_end=0, # FIXME: 5000
proposal_net_spatial_normalization_region="fg",
eval_num_rays_per_chunk=1024,
use_average_appearance_embedding=False,
background_model="grid",
bg_use_appearance_embedding=False,
use_fg_aware_scene_contraction=True,
fg_aware_scene_contraction_alpha=5.0,
bg_sampler_type="proposal_network",
num_samples_outside=16, # TODO: run ablation (use a smaller number to avoid the fg modeling affected by bg)
num_bg_proposal_samples_per_ray=(96, 64)
),
),
optimizers={
"proposal_networks": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
# "scheduler": MultiStepSchedulerConfig(max_steps=60001),
"scheduler": None,
},
"proposal_networks_bg": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": None,
# "scheduler": MultiStepSchedulerConfig(max_steps=30001),
},
"fields": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
# "scheduler": NeuSSchedulerConfig(warm_up_end=1500, learning_rate_alpha=0.05, max_steps=60001),
"scheduler": MultiStepSchedulerConfig(max_steps=30001),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": None,
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
# medium-sized model (deprecated)
neus_facto_wbg_medium = Config(
method_name="neus-facto-wbg_medium",
trainer=TrainerConfig(
steps_per_eval_image=2000,
steps_per_eval_batch=2000,
steps_per_save=40000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=40001,
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
# TODO: set dataparser args
dataparser=AutoReconDataParserConfig(), # NOTE: cannot set init args here, which would not be recognised.
train_num_rays_per_batch=2048,
eval_num_rays_per_batch=1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=NeuSFactoModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=False,
use_grid_feature=True,
hash_grid_num_levels=16,
num_layers=2,
num_layers_color=2,
hidden_dim=64, # 256 -> 64
geo_feat_dim=64, # 256 -> 64
direction_encoding_type="sh",
hidden_dim_color=64, # 256 -> 64
color_network_include_sdf=False,
bias=0.5,
beta_init=0.3,
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
),
bg_use_appearance_embedding=False,
use_average_appearance_embedding=False,
background_model="grid",
bg_hash_grid_num_levels=16,
eval_num_rays_per_chunk=1024,
proposal_net_spatial_normalization_region="fg"
),
),
optimizers={
"proposal_networks": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": MultiStepSchedulerConfig(max_steps=40001),
},
"fields": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1000, learning_rate_alpha=0.05, max_steps=40001),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=1000, learning_rate_alpha=0.05, max_steps=40001),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
# tiny model (deprecated) - same model size as the one in NeuS2
neus_facto_wbg_tiny = Config(
method_name="neus-facto-wbg_tiny",
trainer=TrainerConfig(
steps_per_eval_image=2000,
steps_per_eval_batch=2000,
steps_per_save=20000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=20001,
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
# TODO: set dataparser args
dataparser=AutoReconDataParserConfig(), # NOTE: cannot set init args here, which would not be recognised.
train_num_rays_per_batch=2048,
eval_num_rays_per_batch=1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=NeuSFactoModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=False,
use_grid_feature=True,
hash_grid_num_levels=14, # NOTE: following NeuS2
num_layers=1, # NOTE: following NeuS2
num_layers_color=2,
hidden_dim=64, # NOTE: following NeuS2
geo_feat_dim=15, # NOTE: following NeuS2
direction_encoding_type="sh", # NOTE: following NeuS2
hidden_dim_color=64, # NOTE: following NeuS2
color_network_include_sdf=False, # TODO: NeuS2 uses True
bias=0.5,
beta_init=0.3,
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
),
bg_use_appearance_embedding=False,
use_average_appearance_embedding=False,
background_model="grid",
bg_hash_grid_num_levels=14, # NOTE: following NeuS2
eval_num_rays_per_chunk=1024,
proposal_net_spatial_normalization_region="fg"
),
),
optimizers={
"proposal_networks": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": MultiStepSchedulerConfig(max_steps=20001),
},
"fields": {
"optimizer": AdamOptimizerConfig(lr=5e-4, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=500, learning_rate_alpha=0.05, max_steps=20001),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=5e-4, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=500, learning_rate_alpha=0.05, max_steps=20001),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
# tiny model (deprecated) - same model size as the one in NeuS2 with a longer training schedule
neus_facto_wbg_tiny_long_schedule = Config(
method_name="neus-facto-wbg_tiny_long-schedule",
trainer=TrainerConfig(
steps_per_eval_image=2500,
steps_per_eval_batch=2500,
steps_per_save=30000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=30001,
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
dataparser=AutoReconDataParserConfig(), # NOTE: cannot set init args here, which would not be recognised.
train_num_rays_per_batch=2048,
eval_num_rays_per_batch=1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=NeuSFactoModelConfig(
sdf_field=SDFFieldConfig(
use_position_encoding=True,
use_grid_feature=True,
hash_grid_num_levels=14, # NOTE: following NeuS2
num_layers=1, # NOTE: following NeuS2
num_layers_color=2,
hidden_dim=64, # NOTE: following NeuS2
geo_feat_dim=15, # NOTE: following NeuS2
direction_encoding_type="sh", # NOTE: following NeuS2
hidden_dim_color=64, # NOTE: following NeuS2
color_network_include_sdf=False, # TODO: NeuS2 uses True
bias=0.5,
beta_init=0.3,
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
),
cos_anneal_end=5000,
use_fg_aware_scene_contraction=True,
fg_aware_scene_contraction_alpha=15.0,
bg_sampler_type="uniform_lin_disp", # TODO: ablation: "proposal_network"
num_samples_outside=64,
bg_use_appearance_embedding=False,
use_average_appearance_embedding=False,
background_model="grid",
bg_hash_grid_num_levels=14, # NOTE: following NeuS2
eval_num_rays_per_chunk=1024,
proposal_net_spatial_normalization_region="fg"
),
),
optimizers={
"proposal_networks": { # fixed gamma=0.3 and n_milestones=3
"optimizer": AdamOptimizerConfig(lr=1e-2, eps=1e-15),
"scheduler": MultiStepSchedulerConfig(max_steps=30001),
},
"fields": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=750, learning_rate_alpha=0.05, max_steps=30001),
},
"field_background": {
"optimizer": AdamOptimizerConfig(lr=2e-3, eps=1e-15),
"scheduler": NeuSSchedulerConfig(warm_up_end=750, learning_rate_alpha=0.05, max_steps=30001),
},
},
viewer=ViewerConfig(num_rays_per_chunk=1 << 15),
vis="viewer",
)
================================================
FILE: AutoReconForDens3R/nerfstudio/configs/autorecon/distilled_neusfacto.py
================================================
from . import *
# 2-stages training of neus-facto:
# 1. train nerfacto; (nerfacto is very robust to the scene condition)
# 2. replace fg nerfacto_field with fg sdf_field (optionally supervised by pre-trained nerfacto)
distilled_neus_facto_wbg = Config(
method_name="distilled-neus-facto-wbg",
trainer=TrainerConfig(
steps_per_eval_image=2500,
steps_per_eval_batch=2500,
steps_per_save=30000,
steps_per_eval_all_images=1000000, # set to a very large model so we don't eval with all images
max_num_iterations=30001,
mixed_precision=True,
),
pipeline=VanillaPipelineConfig(
datamanager=VanillaDataManagerConfig(
dataparser=AutoReconDataParserConfig(), # NOTE: cannot set init args here, which would not be recognised.
train_num_rays_per_batch=4096, # 2048,
eval_num_rays_per_batch=4096, # 1024,
camera_optimizer=CameraOptimizerConfig(
mode="off", optimizer=AdamOptimizerConfig(lr=6e-4, eps=1e-8, weight_decay=1e-2)
),
eval_camera_res_scale_factor=1.0,
collate_fn=partial(nerfstudio_collate,
extra_mappings={PCA: get_first_element,
BasicPointClouds: get_first_element})
),
model=DistilledNeuSFactoModelConfig(
nerfacto_field_enabled=True, # for the 1st-stage training
nerfacto_spatial_normalization_region="fg", # bg nerfacto always uses full
proposal_net_use_separate_contraction=True,
proposal_net_contraction_scale_factor=0.25,
proposal_net_spatial_normalization_region="full",
proposal_use_uniform_sampler=False,
use_fg_aware_scene_contraction=True,
fg_aware_scene_contraction_alpha=15.0,
sdf_field=SDFFieldConfig( # NeuSFacto Medium
use_position_encoding=True,
use_grid_feature=True,
num_layers=2,
num_layers_color=2,
hidden_dim=64, # 256 -> 64
geo_feat_dim=64, # 256 -> 64 -> 15
bias=0.5,
beta_init=0.3,
direction_encoding_type="sh",
use_appearance_embedding=False, # TODO: improve impl efficiency (currently zero embeddings are used -> disable completely!)
inside_outside=False, # only consider object-centric scenes
gitextract_di20vn86/ ├── AutoReconForDens3R/ │ ├── .gitattributes │ ├── .github/ │ │ ├── ISSUE_TEMPLATE/ │ │ │ ├── bug-report.md │ │ │ └── feature_request.md │ │ └── workflows/ │ │ ├── core_code_checks.yml │ │ ├── publish.yml │ │ └── viewer_build_deploy.yml │ ├── .gitignore │ ├── .prettierrc.js │ ├── LICENSE │ ├── README.md │ ├── colab/ │ │ └── demo.ipynb │ ├── docs/ │ │ └── INSTALL.md │ ├── exps/ │ │ └── code-release/ │ │ ├── bmvs/ │ │ │ ├── scan1.sh │ │ │ ├── scan2.sh │ │ │ ├── scan3.sh │ │ │ ├── scan4.sh │ │ │ ├── scan5.sh │ │ │ └── scan6.sh │ │ ├── co3d_demo/ │ │ │ ├── scan1.sh │ │ │ ├── scan2.sh │ │ │ ├── scan3.sh │ │ │ ├── scan4.sh │ │ │ └── scan5.sh │ │ ├── run_dens3r_recon.sh │ │ └── run_pipeline_demo_low-res.sh │ ├── nerfstudio/ │ │ ├── __init__.py │ │ ├── cameras/ │ │ │ ├── __init__.py │ │ │ ├── camera_optimizers.py │ │ │ ├── camera_paths.py │ │ │ ├── camera_utils.py │ │ │ ├── cameras.py │ │ │ ├── lie_groups.py │ │ │ └── rays.py │ │ ├── configs/ │ │ │ ├── __init__.py │ │ │ ├── autorecon/ │ │ │ │ ├── __init__.py │ │ │ │ ├── baseline.py │ │ │ │ ├── common.py │ │ │ │ ├── distilled_neusfacto.py │ │ │ │ ├── feature_field.py │ │ │ │ ├── neusfacto_fast.py │ │ │ │ ├── regularization.py │ │ │ │ └── semantic_nerf.py │ │ │ ├── base_config.py │ │ │ ├── config_utils.py │ │ │ └── method_configs.py │ │ ├── engine/ │ │ │ ├── __init__.py │ │ │ ├── callbacks.py │ │ │ ├── optimizers.py │ │ │ ├── schedulers.py │ │ │ └── trainer.py │ │ ├── exporter/ │ │ │ ├── __init__.py │ │ │ ├── exporter_utils.py │ │ │ ├── mesh_culling_utils.py │ │ │ ├── texture_utils.py │ │ │ └── tsdf_utils.py │ │ ├── field_components/ │ │ │ ├── __init__.py │ │ │ ├── activations.py │ │ │ ├── base_field_component.py │ │ │ ├── embedding.py │ │ │ ├── encodings.py │ │ │ ├── field_heads.py │ │ │ ├── mlp.py │ │ │ ├── progressive_encoding.py │ │ │ ├── spatial_distortions.py │ │ │ └── temporal_distortions.py │ │ ├── fields/ │ │ │ ├── __init__.py │ │ │ ├── base_field.py │ │ │ ├── density_fields.py │ │ │ ├── feature_field.py │ │ │ ├── instant_ngp_field.py │ │ │ ├── nerfacto_field.py │ │ │ ├── nerfw_field.py │ │ │ ├── sdf_field.py │ │ │ ├── semantic_nerf_field.py │ │ │ ├── tensorf_field.py │ │ │ └── vanilla_nerf_field.py │ │ ├── model_components/ │ │ │ ├── __init__.py │ │ │ ├── losses.py │ │ │ ├── patch_warping.py │ │ │ ├── ray_generators.py │ │ │ ├── ray_samplers.py │ │ │ ├── renderers.py │ │ │ └── scene_colliders.py │ │ ├── models/ │ │ │ ├── __init__.py │ │ │ ├── base_model.py │ │ │ ├── base_surface_model.py │ │ │ ├── distilled_neus_facto.py │ │ │ ├── dto.py │ │ │ ├── instant_ngp.py │ │ │ ├── mipnerf.py │ │ │ ├── monosdf.py │ │ │ ├── nerfacto.py │ │ │ ├── neuralreconW.py │ │ │ ├── neus.py │ │ │ ├── neus_acc.py │ │ │ ├── neus_facto.py │ │ │ ├── neus_facto_dff.py │ │ │ ├── neus_facto_reg.py │ │ │ ├── semantic_nerfw.py │ │ │ ├── tensorf.py │ │ │ ├── unisurf.py │ │ │ ├── vanilla_nerf.py │ │ │ └── volsdf.py │ │ ├── pipelines/ │ │ │ ├── __init__.py │ │ │ ├── base_pipeline.py │ │ │ └── dynamic_batch.py │ │ ├── process_data/ │ │ │ ├── __init__.py │ │ │ ├── colmap_utils.py │ │ │ ├── hloc_utils.py │ │ │ ├── insta360_utils.py │ │ │ ├── metashape_utils.py │ │ │ ├── polycam_utils.py │ │ │ ├── process_data_utils.py │ │ │ └── record3d_utils.py │ │ ├── py.typed │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── bilateral_solver.py │ │ │ ├── colormaps.py │ │ │ ├── colors.py │ │ │ ├── comms.py │ │ │ ├── decorators.py │ │ │ ├── eval_utils.py │ │ │ ├── func_utils.py │ │ │ ├── images.py │ │ │ ├── install_checks.py │ │ │ ├── io.py │ │ │ ├── marching_cubes.py │ │ │ ├── mask_utils.py │ │ │ ├── math.py │ │ │ ├── misc.py │ │ │ ├── plotly_utils.py │ │ │ ├── pointclouds.py │ │ │ ├── poses.py │ │ │ ├── printing.py │ │ │ ├── profiler.py │ │ │ ├── rich_utils.py │ │ │ ├── scheduler.py │ │ │ ├── scripts.py │ │ │ ├── tensor_dataclass.py │ │ │ ├── vis_utils.py │ │ │ └── writer.py │ │ └── viewer/ │ │ ├── __init__.py │ │ ├── app/ │ │ │ ├── .eslintrc.json │ │ │ ├── .gitignore │ │ │ ├── package.json │ │ │ ├── public/ │ │ │ │ ├── electron.js │ │ │ │ ├── index.html │ │ │ │ ├── manifest.json │ │ │ │ └── robots.txt │ │ │ ├── requirements.txt │ │ │ ├── run_deploy.py │ │ │ └── src/ │ │ │ ├── App.jsx │ │ │ ├── SceneNode.js │ │ │ ├── index.jsx │ │ │ ├── index.scss │ │ │ ├── modules/ │ │ │ │ ├── Banner/ │ │ │ │ │ ├── Banner.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── ConfigPanel/ │ │ │ │ │ ├── ConfigPanel.jsx │ │ │ │ │ └── ConfigPanelSlice.js │ │ │ │ ├── LandingModal/ │ │ │ │ │ ├── LandingModal.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── LogPanel/ │ │ │ │ │ └── LogPanel.jsx │ │ │ │ ├── RenderModal/ │ │ │ │ │ ├── RenderModal.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── Scene/ │ │ │ │ │ ├── Scene.jsx │ │ │ │ │ └── drawing.js │ │ │ │ ├── SidePanel/ │ │ │ │ │ ├── CameraPanel/ │ │ │ │ │ │ ├── CameraHelper.js │ │ │ │ │ │ ├── CameraPanel.jsx │ │ │ │ │ │ ├── curve.js │ │ │ │ │ │ └── index.jsx │ │ │ │ │ ├── ExportPanel/ │ │ │ │ │ │ ├── ExportPanel.jsx │ │ │ │ │ │ ├── MeshSubPanel.jsx │ │ │ │ │ │ ├── PointcloudSubPanel.jsx │ │ │ │ │ │ └── index.jsx │ │ │ │ │ ├── ScenePanel/ │ │ │ │ │ │ ├── ScenePanel.jsx │ │ │ │ │ │ └── index.jsx │ │ │ │ │ ├── SidePanel.jsx │ │ │ │ │ └── StatusPanel/ │ │ │ │ │ ├── StatusPanel.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── ViewerWindow/ │ │ │ │ │ ├── ViewerWindow.jsx │ │ │ │ │ └── ViewerWindowSlice.js │ │ │ │ ├── ViewportControlsModal/ │ │ │ │ │ ├── ViewportControlsModal.jsx │ │ │ │ │ └── index.jsx │ │ │ │ ├── WebRtcWindow/ │ │ │ │ │ └── WebRtcWindow.jsx │ │ │ │ ├── WebSocket/ │ │ │ │ │ └── WebSocket.jsx │ │ │ │ └── WebSocketUrlField.jsx │ │ │ ├── reducer.js │ │ │ ├── setupTests.js │ │ │ ├── store.js │ │ │ ├── subscriber.js │ │ │ ├── themes/ │ │ │ │ ├── leva_theme.json │ │ │ │ └── theme.ts │ │ │ └── utils.js │ │ └── server/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── path.py │ │ ├── server.py │ │ ├── state/ │ │ │ ├── node.py │ │ │ └── state_node.py │ │ ├── subprocess.py │ │ ├── utils.py │ │ ├── video_stream.py │ │ ├── viewer_utils.py │ │ └── visualizer.py │ ├── pyproject.toml │ ├── scripts/ │ │ ├── __init__.py │ │ ├── benchmarking/ │ │ │ ├── launch_eval_blender.sh │ │ │ └── launch_train_blender.sh │ │ ├── blender/ │ │ │ ├── render_mesh_blender.py │ │ │ └── render_pointcloud_blender.py │ │ ├── completions/ │ │ │ ├── .gitignore │ │ │ ├── __init__.py │ │ │ ├── install.py │ │ │ ├── setup.bash │ │ │ └── setup.zsh │ │ ├── datasets/ │ │ │ ├── extract_monocular_cues.py │ │ │ ├── process_nerfstudio_to_sdfstudio.py │ │ │ ├── process_neuralrgbd_to_sdfstudio.py │ │ │ ├── process_nuscenes_masks.py │ │ │ └── process_scannet_to_sdfstudio.py │ │ ├── docs/ │ │ │ ├── __init__.py │ │ │ ├── add_nb_tags.py │ │ │ └── build_docs.py │ │ ├── eval.py │ │ ├── eval_mask.py │ │ ├── exporter.py │ │ ├── extract_mesh.py │ │ ├── extract_volume.py │ │ ├── generate_kitti360_trainsplit.py │ │ ├── github/ │ │ │ ├── __init__.py │ │ │ └── run_actions.py │ │ ├── heritage_to_nerfstudio.py │ │ ├── licensing/ │ │ │ ├── copyright.txt │ │ │ └── license_headers.sh │ │ ├── preprocess/ │ │ │ └── preprocess_neus_pose.py │ │ ├── process_data.py │ │ ├── render.json │ │ ├── render.py │ │ ├── render_mesh.py │ │ ├── texture.py │ │ ├── train.py │ │ └── viewer/ │ │ └── view_dataset.py │ ├── setup.cfg │ ├── tests/ │ │ ├── cameras/ │ │ │ ├── test_cameras.py │ │ │ └── test_rays.py │ │ ├── field_components/ │ │ │ ├── test_embedding.py │ │ │ ├── test_encodings.py │ │ │ ├── test_field_outputs.py │ │ │ ├── test_fields.py │ │ │ ├── test_mlp.py │ │ │ └── test_temporal_distortions.py │ │ ├── model_components/ │ │ │ ├── test_ray_sampler.py │ │ │ └── test_renderers.py │ │ ├── test_train.py │ │ └── utils/ │ │ ├── test_poses.py │ │ ├── test_tensor_dataclass.py │ │ └── test_visualization.py │ └── third_party/ │ └── AutoDecomp/ │ ├── .gitignore │ ├── LICENSE │ ├── README.md │ ├── auto_decomp/ │ │ ├── __init__.py │ │ ├── cli/ │ │ │ └── inference_transformer.py │ │ ├── configs/ │ │ │ └── inference_transformer/ │ │ │ ├── config.yaml │ │ │ ├── cvpr.yaml │ │ │ ├── cvpr_idr.yaml │ │ │ ├── idr.yaml │ │ │ ├── low-res.yaml │ │ │ └── low-res_idr.yaml │ │ ├── decomp/ │ │ │ ├── preprocess.py │ │ │ └── transformer/ │ │ │ ├── __init__.py │ │ │ ├── dataset/ │ │ │ │ ├── __init__.py │ │ │ │ ├── ncut.py │ │ │ │ └── utils.py │ │ │ ├── lightning/ │ │ │ │ └── module.py │ │ │ ├── modeling/ │ │ │ │ ├── __init__.py │ │ │ │ ├── encoding.py │ │ │ │ ├── point_transformer.py │ │ │ │ ├── utils.py │ │ │ │ └── vision_transformer.py │ │ │ └── utils/ │ │ │ ├── postprocess.py │ │ │ └── saving.py │ │ ├── feature_extraction/ │ │ │ ├── __init__.py │ │ │ └── dino_vit/ │ │ │ ├── __init__.py │ │ │ ├── extract_features.py │ │ │ └── vit_extractor.py │ │ ├── sfm/ │ │ │ ├── __init__.py │ │ │ ├── colmap_from_co3d.py │ │ │ ├── colmap_from_idr.py │ │ │ ├── enums.py │ │ │ ├── pairs_from_sequential.py │ │ │ └── sfm.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── aggregation.py │ │ ├── cli.py │ │ ├── colmap.py │ │ ├── geometry/ │ │ │ ├── __init__.py │ │ │ ├── box3d.py │ │ │ ├── pointcloud/ │ │ │ │ ├── __init__.py │ │ │ │ ├── convert.py │ │ │ │ ├── misc.py │ │ │ │ ├── plane.py │ │ │ │ ├── pointcloud.py │ │ │ │ └── segmentation.py │ │ │ └── transform.py │ │ ├── misc.py │ │ ├── ray.py │ │ ├── tqdm.py │ │ └── viz_3d.py │ ├── ckpts/ │ │ └── no-chair.ckpt │ ├── docs/ │ │ └── INSTALL.md │ ├── requirements.txt │ ├── scripts/ │ │ ├── run_pipeline_demo.sh │ │ ├── run_pipeline_demo_low-res.sh │ │ ├── test_pipeline_bmvs/ │ │ │ ├── bmvs_scan1.sh │ │ │ ├── bmvs_scan2.sh │ │ │ ├── bmvs_scan3.sh │ │ │ ├── bmvs_scan4.sh │ │ │ ├── bmvs_scan5.sh │ │ │ ├── bmvs_scan6.sh │ │ │ ├── cvpr/ │ │ │ │ ├── README.md │ │ │ │ ├── bmvs_scan1.sh │ │ │ │ ├── bmvs_scan2.sh │ │ │ │ ├── bmvs_scan3.sh │ │ │ │ ├── bmvs_scan4.sh │ │ │ │ ├── bmvs_scan5.sh │ │ │ │ └── bmvs_scan6.sh │ │ │ └── low-res/ │ │ │ ├── README.md │ │ │ └── bmvs_scan1.sh │ │ └── test_pipeline_co3d_manual-poses/ │ │ ├── co3d_scan1.sh │ │ ├── co3d_scan2.sh │ │ ├── co3d_scan3.sh │ │ ├── co3d_scan4.sh │ │ ├── co3d_scan5.sh │ │ └── cvpr/ │ │ ├── co3d_scan1.sh │ │ ├── co3d_scan2.sh │ │ ├── co3d_scan3.sh │ │ ├── co3d_scan4.sh │ │ └── co3d_scan5.sh │ ├── setup.py │ └── third_party/ │ ├── Hierarchical-Localization/ │ │ ├── .gitattributes │ │ ├── .gitignore │ │ ├── .gitmodules │ │ ├── Dockerfile │ │ ├── LICENSE │ │ ├── README.md │ │ ├── datasets/ │ │ │ └── sacre_coeur/ │ │ │ └── README.md │ │ ├── demo.ipynb │ │ ├── hloc/ │ │ │ ├── __init__.py │ │ │ ├── colmap_from_nvm.py │ │ │ ├── extract_features.py │ │ │ ├── extractors/ │ │ │ │ ├── __init__.py │ │ │ │ ├── cosplace.py │ │ │ │ ├── d2net.py │ │ │ │ ├── dir.py │ │ │ │ ├── disk.py │ │ │ │ ├── dog.py │ │ │ │ ├── netvlad.py │ │ │ │ ├── openibl.py │ │ │ │ ├── r2d2.py │ │ │ │ └── superpoint.py │ │ │ ├── localize_inloc.py │ │ │ ├── localize_sfm.py │ │ │ ├── match_dense.py │ │ │ ├── match_features.py │ │ │ ├── matchers/ │ │ │ │ ├── __init__.py │ │ │ │ ├── adalam.py │ │ │ │ ├── loftr.py │ │ │ │ ├── nearest_neighbor.py │ │ │ │ └── superglue.py │ │ │ ├── pairs_from_covisibility.py │ │ │ ├── pairs_from_exhaustive.py │ │ │ ├── pairs_from_poses.py │ │ │ ├── pairs_from_retrieval.py │ │ │ ├── pipelines/ │ │ │ │ ├── 4Seasons/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── localize.py │ │ │ │ │ ├── prepare_reference.py │ │ │ │ │ └── utils.py │ │ │ │ ├── 7Scenes/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── create_gt_sfm.py │ │ │ │ │ ├── pipeline.py │ │ │ │ │ └── utils.py │ │ │ │ ├── Aachen/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── pipeline.py │ │ │ │ ├── Aachen_v1_1/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pipeline.py │ │ │ │ │ └── pipeline_loftr.py │ │ │ │ ├── CMU/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── pipeline.py │ │ │ │ ├── Cambridge/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── pipeline.py │ │ │ │ │ └── utils.py │ │ │ │ ├── RobotCar/ │ │ │ │ │ ├── README.md │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── colmap_from_nvm.py │ │ │ │ │ └── pipeline.py │ │ │ │ └── __init__.py │ │ │ ├── reconstruction.py │ │ │ ├── triangulation.py │ │ │ ├── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── base_model.py │ │ │ │ ├── database.py │ │ │ │ ├── geometry.py │ │ │ │ ├── io.py │ │ │ │ ├── parsers.py │ │ │ │ ├── read_write_model.py │ │ │ │ ├── viz.py │ │ │ │ └── viz_3d.py │ │ │ └── visualization.py │ │ ├── pairs/ │ │ │ ├── aachen/ │ │ │ │ ├── pairs-db-covis20.txt │ │ │ │ ├── pairs-query-netvlad20.txt │ │ │ │ ├── pairs-query-netvlad30.txt │ │ │ │ └── pairs-query-netvlad50.txt │ │ │ ├── aachen_v1.1/ │ │ │ │ ├── pairs-db-covis20.txt │ │ │ │ └── pairs-query-netvlad50.txt │ │ │ └── inloc/ │ │ │ ├── pairs-query-netvlad20.txt │ │ │ ├── pairs-query-netvlad30-temporal.txt │ │ │ ├── pairs-query-netvlad30.txt │ │ │ ├── pairs-query-netvlad40-temporal.txt │ │ │ └── pairs-query-netvlad40.txt │ │ ├── pipeline_Aachen.ipynb │ │ ├── pipeline_InLoc.ipynb │ │ ├── pipeline_SfM.ipynb │ │ ├── requirements.txt │ │ ├── setup.py │ │ └── third_party/ │ │ ├── SuperGluePretrainedNetwork/ │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── assets/ │ │ │ │ ├── megadepth_train_scenes.txt │ │ │ │ ├── megadepth_validation_scenes.txt │ │ │ │ ├── phototourism_sample_pairs.txt │ │ │ │ ├── phototourism_test_pairs.txt │ │ │ │ ├── phototourism_test_pairs_original.txt │ │ │ │ ├── scannet_sample_pairs_with_gt.txt │ │ │ │ ├── scannet_test_pairs_with_gt.txt │ │ │ │ ├── yfcc_test_pairs_with_gt.txt │ │ │ │ └── yfcc_test_pairs_with_gt_original.txt │ │ │ ├── demo_superglue.py │ │ │ ├── match_pairs.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── matching.py │ │ │ │ ├── superglue.py │ │ │ │ ├── superpoint.py │ │ │ │ ├── utils.py │ │ │ │ └── weights/ │ │ │ │ ├── superglue_indoor.pth │ │ │ │ ├── superglue_outdoor.pth │ │ │ │ └── superpoint_v1.pth │ │ │ └── requirements.txt │ │ ├── d2net/ │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── extract_features.py │ │ │ ├── extract_hesaff.m │ │ │ ├── hpatches_sequences/ │ │ │ │ ├── HPatches-Sequences-Matching-Benchmark.ipynb │ │ │ │ ├── README.md │ │ │ │ ├── convert_to_png.sh │ │ │ │ ├── download.sh │ │ │ │ └── download_cache.sh │ │ │ ├── image_list_hpatches_sequences.txt │ │ │ ├── image_list_qualitative.txt │ │ │ ├── inloc/ │ │ │ │ ├── README.md │ │ │ │ ├── custom_demo.m │ │ │ │ ├── functions/ │ │ │ │ │ └── wustl_function/ │ │ │ │ │ ├── Features_WUSTL.m │ │ │ │ │ └── parfor_sparseGV.m │ │ │ │ ├── generate_list.m │ │ │ │ └── merge_files.m │ │ │ ├── megadepth_utils/ │ │ │ │ ├── preprocess_scene.py │ │ │ │ ├── preprocess_undistorted_megadepth.sh │ │ │ │ ├── train_scenes.txt │ │ │ │ ├── undistort_reconstructions.py │ │ │ │ └── valid_scenes.txt │ │ │ ├── qualitative/ │ │ │ │ └── Qualitative-Matches.ipynb │ │ │ └── train.py │ │ ├── deep-image-retrieval/ │ │ │ ├── .gitignore │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ └── dirtorch/ │ │ │ ├── datasets/ │ │ │ │ ├── __init__.py │ │ │ │ ├── __main__.py │ │ │ │ ├── create.py │ │ │ │ ├── dataset.py │ │ │ │ ├── downloader.py │ │ │ │ ├── generic.py │ │ │ │ ├── generic_func.py │ │ │ │ ├── landmarks.py │ │ │ │ ├── landmarks18.py │ │ │ │ ├── oxford.py │ │ │ │ └── paris.py │ │ │ ├── extract_features.py │ │ │ ├── extract_kapture.py │ │ │ ├── loss.py │ │ │ ├── nets/ │ │ │ │ ├── __init__.py │ │ │ │ ├── __main__.py │ │ │ │ ├── backbones/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── resnet.py │ │ │ │ │ └── resnext101_features.py │ │ │ │ ├── layers/ │ │ │ │ │ └── pooling.py │ │ │ │ ├── rmac_resnet.py │ │ │ │ ├── rmac_resnet_fpn.py │ │ │ │ └── rmac_resnext.py │ │ │ ├── test_dir.py │ │ │ └── utils/ │ │ │ ├── common.py │ │ │ ├── convenient.py │ │ │ ├── evaluation.py │ │ │ ├── funcs.py │ │ │ ├── pytorch_loader.py │ │ │ ├── transforms.py │ │ │ └── transforms_tools.py │ │ ├── disk/ │ │ │ ├── .gitignore │ │ │ ├── .gitmodules │ │ │ ├── LICENSE.txt │ │ │ ├── README.md │ │ │ ├── colmap/ │ │ │ │ ├── colmap/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── database.py │ │ │ │ │ ├── license.txt │ │ │ │ │ ├── read_dense.py │ │ │ │ │ └── read_model.py │ │ │ │ ├── colmap2dataset.py │ │ │ │ ├── h5_to_db.py │ │ │ │ └── merge_datasets.py │ │ │ ├── compute_validation_auc.py │ │ │ ├── detect.py │ │ │ ├── disk/ │ │ │ │ ├── __init__.py │ │ │ │ ├── common/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── errors.py │ │ │ │ │ ├── image.py │ │ │ │ │ ├── logger.py │ │ │ │ │ ├── structs.py │ │ │ │ │ └── vis.py │ │ │ │ ├── geom/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── distance_matrix.py │ │ │ │ │ ├── epi.py │ │ │ │ │ └── pose.py │ │ │ │ ├── loss/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── discrete_metric.py │ │ │ │ │ ├── pose_metric.py │ │ │ │ │ ├── ransac.py │ │ │ │ │ ├── reinforce.py │ │ │ │ │ └── rewards.py │ │ │ │ └── model/ │ │ │ │ ├── __init__.py │ │ │ │ ├── consistent_matcher.py │ │ │ │ ├── cycle_matcher.py │ │ │ │ ├── detector.py │ │ │ │ ├── disk.py │ │ │ │ └── nms.py │ │ │ ├── download_dataset │ │ │ ├── match.py │ │ │ ├── requirements.txt │ │ │ ├── train.py │ │ │ └── view_h5.py │ │ └── r2d2/ │ │ ├── LICENSE │ │ ├── NOTICE │ │ ├── README.md │ │ ├── datasets/ │ │ │ ├── __init__.py │ │ │ ├── aachen.py │ │ │ ├── dataset.py │ │ │ ├── imgfolder.py │ │ │ ├── pair_dataset.py │ │ │ └── web_images.py │ │ ├── download_training_data.sh │ │ ├── extract.py │ │ ├── extract_kapture.py │ │ ├── models/ │ │ │ ├── r2d2_WAF_N16.pt │ │ │ ├── r2d2_WASF_N16.pt │ │ │ └── r2d2_WASF_N8_big.pt │ │ ├── nets/ │ │ │ ├── ap_loss.py │ │ │ ├── losses.py │ │ │ ├── patchnet.py │ │ │ ├── reliability_loss.py │ │ │ ├── repeatability_loss.py │ │ │ └── sampler.py │ │ ├── results/ │ │ │ ├── r2d2_WAF_N16.scale-0.3-1.npy │ │ │ ├── r2d2_WAF_N16.size-256-1024.npy │ │ │ ├── r2d2_WASF_N16.scale-0.3-1.npy │ │ │ ├── r2d2_WASF_N16.size-256-1024.npy │ │ │ └── r2d2_W_N16.scale-0.3-1.npy │ │ ├── tools/ │ │ │ ├── common.py │ │ │ ├── dataloader.py │ │ │ ├── trainer.py │ │ │ ├── transforms.py │ │ │ ├── transforms_tools.py │ │ │ └── viz.py │ │ ├── train.py │ │ └── viz_heatmaps.py │ └── LoFTR/ │ ├── .gitignore │ ├── .gitmodules │ ├── LICENSE │ ├── README.md │ ├── assets/ │ │ ├── megadepth_test_1500_scene_info/ │ │ │ ├── 0015_0.1_0.3.npz │ │ │ ├── 0015_0.3_0.5.npz │ │ │ ├── 0022_0.1_0.3.npz │ │ │ ├── 0022_0.3_0.5.npz │ │ │ ├── 0022_0.5_0.7.npz │ │ │ └── megadepth_test_1500.txt │ │ └── scannet_test_1500/ │ │ ├── intrinsics.npz │ │ ├── scannet_test.txt │ │ ├── statistics.json │ │ └── test.npz │ ├── configs/ │ │ └── loftr/ │ │ ├── indoor/ │ │ │ ├── buggy_pos_enc/ │ │ │ │ ├── loftr_ds.py │ │ │ │ ├── loftr_ds_dense.py │ │ │ │ ├── loftr_ot.py │ │ │ │ └── loftr_ot_dense.py │ │ │ ├── debug/ │ │ │ │ └── .gitignore │ │ │ ├── loftr_ds.py │ │ │ ├── loftr_ds_dense.py │ │ │ ├── loftr_ot.py │ │ │ ├── loftr_ot_dense.py │ │ │ └── scannet/ │ │ │ ├── loftr_ds_eval.py │ │ │ └── loftr_ds_eval_new.py │ │ └── outdoor/ │ │ ├── buggy_pos_enc/ │ │ │ ├── loftr_ds.py │ │ │ ├── loftr_ds_dense.py │ │ │ ├── loftr_ot.py │ │ │ └── loftr_ot_dense.py │ │ ├── debug/ │ │ │ └── .gitignore │ │ ├── loftr_ds.py │ │ ├── loftr_ds_dense.py │ │ ├── loftr_ot.py │ │ └── loftr_ot_dense.py │ ├── demo/ │ │ ├── demo_loftr.py │ │ └── run_demo.sh │ ├── docs/ │ │ └── TRAINING.md │ ├── environment.yaml │ ├── loftr/ │ │ ├── __init__.py │ │ ├── config/ │ │ │ └── default.py │ │ ├── datasets/ │ │ │ ├── megadepth.py │ │ │ ├── sampler.py │ │ │ └── scannet.py │ │ ├── lightning/ │ │ │ ├── data.py │ │ │ └── lightning_loftr.py │ │ ├── loftr/ │ │ │ ├── __init__.py │ │ │ ├── backbone/ │ │ │ │ ├── __init__.py │ │ │ │ └── resnet_fpn.py │ │ │ ├── loftr.py │ │ │ ├── loftr_module/ │ │ │ │ ├── __init__.py │ │ │ │ ├── fine_preprocess.py │ │ │ │ ├── linear_attention.py │ │ │ │ └── transformer.py │ │ │ └── utils/ │ │ │ ├── coarse_matching.py │ │ │ ├── cvpr_ds_config.py │ │ │ ├── fine_matching.py │ │ │ ├── geometry.py │ │ │ ├── position_encoding.py │ │ │ └── supervision.py │ │ ├── losses/ │ │ │ └── loftr_loss.py │ │ ├── optimizers/ │ │ │ └── __init__.py │ │ ├── tools/ │ │ │ └── hloc_match_features.py │ │ └── utils/ │ │ ├── augment.py │ │ ├── comm.py │ │ ├── dataloader.py │ │ ├── dataset.py │ │ ├── metrics.py │ │ ├── misc.py │ │ ├── plotting.py │ │ └── profiler.py │ ├── requirements.txt │ ├── scripts/ │ │ ├── reproduce_test/ │ │ │ ├── indoor_ds.sh │ │ │ ├── indoor_ds_new.sh │ │ │ ├── indoor_ot.sh │ │ │ ├── outdoor_ds.sh │ │ │ └── outdoor_ot.sh │ │ └── reproduce_train/ │ │ ├── debug/ │ │ │ └── .gitignore │ │ ├── indoor_ds.sh │ │ ├── indoor_ot.sh │ │ ├── outdoor_ds.sh │ │ └── outdoor_ot.sh │ ├── setup.py │ ├── test.py │ └── train.py ├── LICENSE ├── NOTICE ├── README.md ├── croco/ │ ├── .gitignore │ ├── LICENSE │ ├── NOTICE │ ├── README.MD │ ├── datasets/ │ │ ├── __init__.py │ │ ├── crops/ │ │ │ ├── README.MD │ │ │ └── extract_crops_from_images.py │ │ ├── habitat_sim/ │ │ │ ├── README.MD │ │ │ ├── __init__.py │ │ │ ├── generate_from_metadata.py │ │ │ ├── generate_from_metadata_files.py │ │ │ ├── generate_multiview_images.py │ │ │ ├── multiview_habitat_sim_generator.py │ │ │ ├── pack_metadata_files.py │ │ │ └── paths.py │ │ ├── pairs_dataset.py │ │ └── transforms.py │ ├── models/ │ │ ├── blocks.py │ │ ├── criterion.py │ │ ├── croco.py │ │ ├── croco_downstream.py │ │ ├── curope/ │ │ │ ├── __init__.py │ │ │ ├── curope.cpp │ │ │ ├── curope2d.py │ │ │ ├── kernels.cu │ │ │ └── setup.py │ │ ├── dpt_block.py │ │ ├── head_downstream.py │ │ ├── masking.py │ │ └── pos_embed.py │ ├── pretrain.py │ ├── stereoflow/ │ │ ├── README.MD │ │ ├── augmentor.py │ │ ├── criterion.py │ │ ├── datasets_flow.py │ │ ├── datasets_stereo.py │ │ ├── download_model.sh │ │ ├── engine.py │ │ ├── test.py │ │ └── train.py │ └── utils/ │ └── misc.py ├── dust3r/ │ ├── __init__.py │ ├── datasets/ │ │ ├── __init__.py │ │ ├── base/ │ │ │ ├── __init__.py │ │ │ ├── base_stereo_view_dataset.py │ │ │ ├── batched_sampler.py │ │ │ ├── easy_dataset.py │ │ │ └── mast3r_base_stereo_view_dataset.py │ │ └── utils/ │ │ ├── __init__.py │ │ ├── cropping.py │ │ ├── mast3r_cropping.py │ │ └── transforms.py │ ├── heads/ │ │ ├── __init__.py │ │ ├── dpt_head.py │ │ ├── linear_head.py │ │ └── postprocess.py │ ├── image_pairs.py │ ├── inference.py │ ├── losses.py │ ├── model.py │ ├── optim_factory.py │ ├── patch_embed.py │ ├── post_process.py │ ├── utils/ │ │ ├── __init__.py │ │ ├── device.py │ │ ├── geometry.py │ │ ├── image.py │ │ ├── misc.py │ │ ├── parallel.py │ │ ├── path_to_croco.py │ │ └── read_write_model.py │ └── viz.py ├── infer/ │ ├── demo_online.py │ ├── demo_utils.py │ ├── dens3r_recon.py │ ├── eval_scripts/ │ │ ├── eval_depth.py │ │ ├── eval_matching.py │ │ ├── eval_normal.py │ │ └── matching_metrics.py │ └── infer_normal_pts3d.py ├── mast3r/ │ ├── .gitignore │ ├── __init__.py │ ├── catmlp_dpt_head.py │ ├── colmap_utils/ │ │ ├── __init__.py │ │ ├── database.py │ │ └── database_utils.py │ ├── fast_nn.py │ ├── image_pairs.py │ ├── losses.py │ ├── model.py │ ├── retrieval/ │ │ ├── graph.py │ │ ├── model.py │ │ └── processor.py │ ├── ssim.py │ └── utils/ │ ├── __init__.py │ ├── coarse_to_fine.py │ ├── collate.py │ ├── misc.py │ └── path_to_dust3r.py └── requirements.txt
Showing preview only (324K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (3758 symbols across 426 files)
FILE: AutoReconForDens3R/nerfstudio/cameras/camera_optimizers.py
class CameraOptimizerConfig (line 39) | class CameraOptimizerConfig(cfg.InstantiateConfig):
class CameraOptimizer (line 64) | class CameraOptimizer(nn.Module):
method __init__ (line 69) | def __init__(
method forward (line 99) | def forward(
FILE: AutoReconForDens3R/nerfstudio/cameras/camera_paths.py
function get_interpolated_camera_path (line 30) | def get_interpolated_camera_path(cameras: Cameras, steps: int) -> Cameras:
function get_spiral_path (line 48) | def get_spiral_path(
function get_path_from_json (line 114) | def get_path_from_json(camera_path: Dict[str, Any]) -> Cameras:
function get_path_from_npz (line 152) | def get_path_from_npz(camera_path: Dict[str, Any]) -> Cameras:
FILE: AutoReconForDens3R/nerfstudio/cameras/camera_utils.py
function unit_vector (line 30) | def unit_vector(data, axis: Optional[int] = None) -> np.ndarray:
function quaternion_from_matrix (line 49) | def quaternion_from_matrix(matrix, isprecise: bool = False) -> np.ndarray:
function quaternion_slerp (line 105) | def quaternion_slerp(quat0, quat1, fraction: float, spin: int = 0, short...
function quaternion_matrix (line 139) | def quaternion_matrix(quaternion) -> np.ndarray:
function get_interpolated_poses (line 161) | def get_interpolated_poses(pose_a, pose_b, steps: int = 10) -> List[float]:
function get_interpolated_k (line 185) | def get_interpolated_k(k_a, k_b, steps: int = 10) -> TensorType[3, 4]:
function get_interpolated_poses_many (line 202) | def get_interpolated_poses_many(
function normalize (line 228) | def normalize(x) -> TensorType[...]:
function viewmatrix (line 233) | def viewmatrix(lookat, up, pos) -> TensorType[...]:
function get_distortion_params (line 252) | def get_distortion_params(
function _compute_residual_and_jacobian (line 276) | def _compute_residual_and_jacobian(
function radial_and_tangential_undistort (line 342) | def radial_and_tangential_undistort(
function rotation_matrix (line 382) | def rotation_matrix(a: TensorType[3], b: TensorType[3]) -> TensorType[3,...
function auto_orient_and_center_poses (line 410) | def auto_orient_and_center_poses(
FILE: AutoReconForDens3R/nerfstudio/cameras/cameras.py
class CameraType (line 36) | class CameraType(Enum):
class Cameras (line 56) | class Cameras(TensorDataclass):
method __init__ (line 89) | def __init__(
method _init_get_fc_xy (line 145) | def _init_get_fc_xy(self, fc_xy, name):
method _init_get_camera_type (line 166) | def _init_get_camera_type(
method _init_get_height_width (line 208) | def _init_get_height_width(
method _init_get_times (line 240) | def _init_get_times(self, times):
method device (line 252) | def device(self):
method image_height (line 257) | def image_height(self) -> TensorType["num_cameras":..., 1]:
method image_width (line 262) | def image_width(self) -> TensorType["num_cameras":..., 1]:
method is_jagged (line 267) | def is_jagged(self):
method get_image_coords (line 276) | def get_image_coords(
method generate_rays (line 304) | def generate_rays( # pylint: disable=too-many-statements
method _generate_rays_from_coords (line 459) | def _generate_rays_from_coords(
method to_json (line 698) | def to_json(
method get_intrinsics_matrices (line 733) | def get_intrinsics_matrices(self) -> TensorType["num_cameras":..., 3, 3]:
method rescale_output_resolution (line 747) | def rescale_output_resolution(
FILE: AutoReconForDens3R/nerfstudio/cameras/lie_groups.py
function exp_map_SO3xR3 (line 23) | def exp_map_SO3xR3(tangent_vector: TensorType["b", 6]) -> TensorType["b"...
function exp_map_SE3 (line 61) | def exp_map_SE3(tangent_vector: TensorType["b", 6]) -> TensorType["b", 3...
FILE: AutoReconForDens3R/nerfstudio/cameras/rays.py
class Frustums (line 39) | class Frustums(TensorDataclass):
method get_positions (line 55) | def get_positions(self) -> TensorType[..., 3]:
method set_offsets (line 66) | def set_offsets(self, offsets):
method get_start_positions (line 70) | def get_start_positions(self) -> TensorType[..., 3]:
method get_gaussian_blob (line 84) | def get_gaussian_blob(self) -> Gaussians:
method get_mock_frustum (line 103) | def get_mock_frustum(cls, device="cpu") -> "Frustums":
class RaySamples (line 119) | class RaySamples(TensorDataclass):
method __post_init__ (line 140) | def __post_init__(self) -> None:
method get_weights (line 153) | def get_weights(self, densities: TensorType[..., "num_samples", 1]) ->...
method get_weights_and_transmittance (line 179) | def get_weights_and_transmittance(
method get_weights_from_alphas (line 205) | def get_weights_from_alphas(self, alphas: TensorType[..., "num_samples...
method get_weights_and_transmittance_from_alphas (line 224) | def get_weights_and_transmittance_from_alphas(
class RayBundle (line 250) | class RayBundle(TensorDataclass):
method set_camera_indices (line 274) | def set_camera_indices(self, camera_index: int) -> None:
method __len__ (line 282) | def __len__(self):
method sample (line 286) | def sample(self, num_rays: int) -> "RayBundle":
method get_row_major_sliced_ray_bundle (line 299) | def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int...
method get_ray_samples (line 312) | def get_ray_samples(
FILE: AutoReconForDens3R/nerfstudio/configs/base_config.py
class PrintableConfig (line 44) | class PrintableConfig: # pylint: disable=too-few-public-methods
method __str__ (line 47) | def __str__(self):
class InstantiateConfig (line 62) | class InstantiateConfig(PrintableConfig): # pylint: disable=too-few-pub...
method setup (line 67) | def setup(self, **kwargs) -> Any:
class MachineConfig (line 74) | class MachineConfig(PrintableConfig):
class LocalWriterConfig (line 90) | class LocalWriterConfig(InstantiateConfig):
method setup (line 108) | def setup(self, banner_messages: Optional[List[str]] = None, **kwargs)...
class LoggingConfig (line 118) | class LoggingConfig(PrintableConfig):
class TrainerConfig (line 137) | class TrainerConfig(PrintableConfig):
class ViewerConfig (line 173) | class ViewerConfig(PrintableConfig):
class Config (line 207) | class Config(PrintableConfig):
method set_global (line 242) | def set_global(self) -> None:
method is_viewer_enabled (line 253) | def is_viewer_enabled(self) -> bool:
method is_wandb_enabled (line 257) | def is_wandb_enabled(self) -> bool:
method is_tensorboard_enabled (line 261) | def is_tensorboard_enabled(self) -> bool:
method set_timestamp (line 265) | def set_timestamp(self) -> None:
method set_experiment_name (line 270) | def set_experiment_name(self) -> None:
method get_base_dir (line 275) | def get_base_dir(self) -> Path:
method get_checkpoint_dir (line 282) | def get_checkpoint_dir(self) -> Path:
method print_to_terminal (line 286) | def print_to_terminal(self) -> None:
method save_config (line 292) | def save_config(self) -> None:
FILE: AutoReconForDens3R/nerfstudio/configs/config_utils.py
function to_immutable_dict (line 30) | def to_immutable_dict(d: Dict[str, Any]):
function convert_markup_to_ansi (line 39) | def convert_markup_to_ansi(markup_string: str) -> str:
FILE: AutoReconForDens3R/nerfstudio/engine/callbacks.py
class TrainingCallbackAttributes (line 28) | class TrainingCallbackAttributes:
class TrainingCallbackLocation (line 45) | class TrainingCallbackLocation(Enum):
class TrainingCallback (line 52) | class TrainingCallback:
method __init__ (line 66) | def __init__(
method run_callback (line 85) | def run_callback(self, step: int):
method run_callback_at_location (line 98) | def run_callback_at_location(self, step: int, location: TrainingCallba...
FILE: AutoReconForDens3R/nerfstudio/engine/optimizers.py
class OptimizerConfig (line 36) | class OptimizerConfig(base_config.PrintableConfig):
method setup (line 50) | def setup(self, params) -> Any:
class AdamOptimizerConfig (line 59) | class AdamOptimizerConfig(OptimizerConfig):
class RAdamOptimizerConfig (line 67) | class RAdamOptimizerConfig(OptimizerConfig):
class AdamWOptimizerConfig (line 74) | class AdamWOptimizerConfig(OptimizerConfig):
function setup_optimizers (line 81) | def setup_optimizers(config: base_config.Config, param_groups: Dict[str,...
class Optimizers (line 104) | class Optimizers:
method __init__ (line 112) | def __init__(self, config: Dict[str, Any], param_groups: Dict[str, Lis...
method optimizer_step (line 129) | def optimizer_step(self, param_group_name: str) -> None:
method scheduler_step (line 137) | def scheduler_step(self, param_group_name: str) -> None:
method zero_grad_all (line 146) | def zero_grad_all(self) -> None:
method optimizer_scaler_step_all (line 151) | def optimizer_scaler_step_all(self, grad_scaler: GradScaler) -> None:
method optimizer_step_all (line 171) | def optimizer_step_all(self):
method scheduler_step_all (line 180) | def scheduler_step_all(self, step: int) -> None:
method load_optimizers (line 192) | def load_optimizers(self, loaded_state: Dict[str, Any]) -> None:
method load_schedulers (line 201) | def load_schedulers(self, loaded_state: Dict[str, Any]) -> None:
FILE: AutoReconForDens3R/nerfstudio/engine/schedulers.py
class SchedulerConfig (line 27) | class SchedulerConfig(InstantiateConfig):
method setup (line 36) | def setup(self, optimizer=None, lr_init=None, **kwargs) -> Any:
class ExponentialDecaySchedule (line 41) | class ExponentialDecaySchedule(lr_scheduler.LambdaLR):
method __init__ (line 58) | def __init__(self, optimizer, lr_init, lr_final, max_steps, lr_delay_s...
class DelayerScheduler (line 76) | class DelayerScheduler(lr_scheduler.LambdaLR):
method __init__ (line 79) | def __init__(
class DelayedExponentialScheduler (line 99) | class DelayedExponentialScheduler(DelayerScheduler):
method __init__ (line 102) | def __init__(
class MultiStepSchedulerConfig (line 120) | class MultiStepSchedulerConfig(InstantiateConfig):
method setup (line 126) | def setup(self, optimizer=None, lr_init=None, **kwargs) -> Any:
class ExponentialSchedulerConfig (line 136) | class ExponentialSchedulerConfig(InstantiateConfig):
method setup (line 143) | def setup(self, optimizer=None, lr_init=None, **kwargs) -> Any:
class NeuSSchedulerConfig (line 152) | class NeuSSchedulerConfig(InstantiateConfig):
method setup (line 160) | def setup(self, optimizer=None, lr_init=None, **kwargs) -> Any:
class NeuSScheduler (line 170) | class NeuSScheduler(lr_scheduler.LambdaLR):
method __init__ (line 173) | def __init__(self, optimizer, warm_up_end, learning_rate_alpha, max_st...
class AutoReconSchedulerConfig (line 187) | class AutoReconSchedulerConfig(InstantiateConfig):
method setup (line 195) | def setup(self, optimizer=None, lr_init=None, **kwargs) -> Any:
class AutoReconScheduler (line 203) | class AutoReconScheduler(lr_scheduler.LambdaLR):
method __init__ (line 206) | def __init__(self, optimizer, warm_up_end) -> None:
FILE: AutoReconForDens3R/nerfstudio/engine/trainer.py
class Trainer (line 52) | class Trainer:
method __init__ (line 74) | def __init__(self, config: cfg.Config, local_rank: int = 0, world_size...
method setup (line 106) | def setup(self, test_mode: Literal["test", "val", "inference"] = "val"):
method train (line 131) | def train(self) -> None:
method _always_render (line 189) | def _always_render(self, step):
method _check_viewer_warnings (line 196) | def _check_viewer_warnings(self) -> None:
method _init_viewer_state (line 206) | def _init_viewer_state(self) -> None:
method _update_viewer_state (line 217) | def _update_viewer_state(self, step: int):
method _update_viewer_rays_per_sec (line 237) | def _update_viewer_rays_per_sec(self, train_t: TimeWriter, vis_t: Time...
method _load_checkpoint (line 253) | def _load_checkpoint(self) -> None:
method save_checkpoint (line 286) | def save_checkpoint(self, step: int) -> None:
method train_iteration (line 317) | def train_iteration(self, step: int) -> Tuple[torch.Tensor, Dict[str, ...
method eval_iteration (line 338) | def eval_iteration(self, step):
FILE: AutoReconForDens3R/nerfstudio/exporter/exporter_utils.py
class Mesh (line 50) | class Mesh:
function get_mesh_from_pymeshlab_mesh (line 65) | def get_mesh_from_pymeshlab_mesh(mesh: pymeshlab.Mesh) -> Mesh:
function get_mesh_from_filename (line 78) | def get_mesh_from_filename(filename: str, target_num_faces: Optional[int...
function generate_point_cloud (line 89) | def generate_point_cloud(
function render_trajectory (line 211) | def render_trajectory(
FILE: AutoReconForDens3R/nerfstudio/exporter/mesh_culling_utils.py
class MeshCullerConfig (line 28) | class MeshCullerConfig(InstantiateConfig):
class MeshCuller (line 50) | class MeshCuller:
method __init__ (line 53) | def __init__(self, config: MeshCullerConfig):
method main (line 58) | def main(self, mesh: Mesh, pipeline: Pipeline, output_path: Path):
method _get_ray_sampler (line 108) | def _get_ray_sampler(self):
method _build_ray_bundle (line 125) | def _build_ray_bundle(self, vertices, faces, face_normals) -> RayBundle:
method _get_ray_length (line 142) | def _get_ray_length(self, vertices, faces):
FILE: AutoReconForDens3R/nerfstudio/exporter/texture_utils.py
function get_parallelogram_area (line 43) | def get_parallelogram_area(
function get_texture_image (line 59) | def get_texture_image(num_pixels_w, num_pixels_h, device):
function unwrap_mesh_per_uv_triangle (line 78) | def unwrap_mesh_per_uv_triangle(
function unwrap_mesh_with_xatlas (line 210) | def unwrap_mesh_with_xatlas(
function export_textured_mesh (line 326) | def export_textured_mesh(
FILE: AutoReconForDens3R/nerfstudio/exporter/tsdf_utils.py
class TSDF (line 42) | class TSDF:
method to (line 62) | def to(self, device: str):
method device (line 77) | def device(self):
method truncation (line 82) | def truncation(self):
method from_aabb (line 89) | def from_aabb(aabb: TensorType[2, 3], volume_dims: TensorType[3]):
method get_mesh (line 116) | def get_mesh(self) -> Mesh:
method export_mesh (line 139) | def export_mesh(cls, mesh: Mesh, filename: str):
method integrate_tsdf (line 168) | def integrate_tsdf(
function export_tsdf_mesh (line 273) | def export_tsdf_mesh(
FILE: AutoReconForDens3R/nerfstudio/field_components/activations.py
class _TruncExp (line 24) | class _TruncExp(Function): # pylint: disable=abstract-method
method forward (line 29) | def forward(ctx, x): # pylint: disable=arguments-differ
method backward (line 35) | def backward(ctx, g): # pylint: disable=arguments-differ
FILE: AutoReconForDens3R/nerfstudio/field_components/base_field_component.py
class FieldComponent (line 25) | class FieldComponent(nn.Module):
method __init__ (line 33) | def __init__(self, in_dim: Optional[int] = None, out_dim: Optional[int...
method build_nn_modules (line 38) | def build_nn_modules(self) -> None:
method set_in_dim (line 42) | def set_in_dim(self, in_dim: int) -> None:
method get_out_dim (line 52) | def get_out_dim(self) -> int:
method forward (line 59) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
FILE: AutoReconForDens3R/nerfstudio/field_components/embedding.py
class Embedding (line 26) | class Embedding(FieldComponent):
method __init__ (line 35) | def __init__(self, in_dim: int, out_dim: int) -> None:
method build_nn_modules (line 41) | def build_nn_modules(self) -> None:
method mean (line 44) | def mean(self, dim=0):
method forward (line 48) | def forward(self, in_tensor: TensorType[..., "input_dim"]) -> TensorTy...
FILE: AutoReconForDens3R/nerfstudio/field_components/encodings.py
class Encoding (line 41) | class Encoding(FieldComponent):
method __init__ (line 48) | def __init__(self, in_dim: int) -> None:
method forward (line 54) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class Identity (line 63) | class Identity(Encoding):
method get_out_dim (line 66) | def get_out_dim(self) -> int:
method forward (line 71) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class ScalingAndOffset (line 75) | class ScalingAndOffset(Encoding):
method __init__ (line 84) | def __init__(self, in_dim: int, scaling: float = 1.0, offset: float = ...
method get_out_dim (line 90) | def get_out_dim(self) -> int:
method forward (line 95) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class NeRFEncoding (line 99) | class NeRFEncoding(Encoding):
method __init__ (line 111) | def __init__(
method get_out_dim (line 121) | def get_out_dim(self) -> int:
method forward (line 129) | def forward(
class RFFEncoding (line 162) | class RFFEncoding(Encoding):
method __init__ (line 172) | def __init__(self, in_dim: int, num_frequencies: int, scale: float, in...
method get_out_dim (line 185) | def get_out_dim(self) -> int:
method forward (line 188) | def forward(
class HashEncoding (line 220) | class HashEncoding(Encoding):
method __init__ (line 234) | def __init__(
method get_out_dim (line 286) | def get_out_dim(self) -> int:
method hash_fn (line 289) | def hash_fn(self, in_tensor: TensorType["bs":..., "num_levels", 3]) ->...
method pytorch_fwd (line 308) | def pytorch_fwd(self, in_tensor: TensorType["bs":..., "input_dim"]) ->...
method forward (line 351) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class TensorCPEncoding (line 357) | class TensorCPEncoding(Encoding):
method __init__ (line 366) | def __init__(self, resolution: int = 256, num_components: int = 24, in...
method get_out_dim (line 375) | def get_out_dim(self) -> int:
method forward (line 378) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
method upsample_grid (line 393) | def upsample_grid(self, resolution: int) -> None:
class TensorVMEncoding (line 407) | class TensorVMEncoding(Encoding):
method __init__ (line 419) | def __init__(
method get_out_dim (line 437) | def get_out_dim(self) -> int:
method index_fn (line 440) | def index_fn(self, x: torch.Tensor, y: torch.Tensor, width: int, heigh...
method grid_sample_2d (line 453) | def grid_sample_2d(self, feature, coord, type="plane"):
method forward (line 496) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
method upsample_grid (line 529) | def upsample_grid(self, resolution: int) -> None:
class SHEncoding (line 545) | class SHEncoding(Encoding):
method __init__ (line 552) | def __init__(self, levels: int = 4) -> None:
method get_out_dim (line 560) | def get_out_dim(self) -> int:
method forward (line 564) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class PeriodicVolumeEncoding (line 568) | class PeriodicVolumeEncoding(Encoding):
method __init__ (line 581) | def __init__(
method get_out_dim (line 616) | def get_out_dim(self) -> int:
method hash_fn (line 619) | def hash_fn(self, in_tensor: TensorType["bs":..., "num_levels", 3]) ->...
method pytorch_fwd (line 640) | def pytorch_fwd(self, in_tensor: TensorType["bs":..., "input_dim"]) ->...
method forward (line 686) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
method get_total_variation_loss (line 689) | def get_total_variation_loss(self):
FILE: AutoReconForDens3R/nerfstudio/field_components/field_heads.py
class FieldHeadNames (line 28) | class FieldHeadNames(Enum):
class FieldHead (line 49) | class FieldHead(FieldComponent):
method __init__ (line 59) | def __init__(
method set_in_dim (line 76) | def set_in_dim(self, in_dim: int) -> None:
method _construct_net (line 81) | def _construct_net(self):
method forward (line 84) | def forward(self, in_tensor: TensorType["bs":..., "in_dim"]) -> Tensor...
class DensityFieldHead (line 101) | class DensityFieldHead(FieldHead):
method __init__ (line 109) | def __init__(self, in_dim: Optional[int] = None, activation: Optional[...
class RGBFieldHead (line 113) | class RGBFieldHead(FieldHead):
method __init__ (line 121) | def __init__(self, in_dim: Optional[int] = None, activation: Optional[...
class SHFieldHead (line 125) | class SHFieldHead(FieldHead):
method __init__ (line 135) | def __init__(
class UncertaintyFieldHead (line 143) | class UncertaintyFieldHead(FieldHead):
method __init__ (line 151) | def __init__(self, in_dim: Optional[int] = None, activation: Optional[...
class TransientRGBFieldHead (line 155) | class TransientRGBFieldHead(FieldHead):
method __init__ (line 163) | def __init__(self, in_dim: Optional[int] = None, activation: Optional[...
class TransientDensityFieldHead (line 167) | class TransientDensityFieldHead(FieldHead):
method __init__ (line 175) | def __init__(self, in_dim: Optional[int] = None, activation: Optional[...
class SemanticFieldHead (line 181) | class SemanticFieldHead(FieldHead):
method __init__ (line 190) | def __init__(self, num_classes: int, in_dim: Optional[int] = None) -> ...
class PredNormalsFieldHead (line 194) | class PredNormalsFieldHead(FieldHead):
method __init__ (line 202) | def __init__(self, in_dim: Optional[int] = None, activation: Optional[...
method forward (line 205) | def forward(self, in_tensor: TensorType["bs":..., "in_dim"]) -> Tensor...
FILE: AutoReconForDens3R/nerfstudio/field_components/mlp.py
class MLP (line 27) | class MLP(FieldComponent):
method __init__ (line 39) | def __init__(
method build_nn_modules (line 63) | def build_nn_modules(self) -> None:
method forward (line 80) | def forward(self, in_tensor: TensorType["bs":..., "in_dim"]) -> Tensor...
FILE: AutoReconForDens3R/nerfstudio/field_components/progressive_encoding.py
class ProgressiveEncoding (line 11) | class ProgressiveEncoding(nn.Module):
method __init__ (line 14) | def __init__(
method _get_init_weights (line 41) | def _get_init_weights(self):
method forward (line 46) | def forward(self, positions):
FILE: AutoReconForDens3R/nerfstudio/field_components/spatial_distortions.py
class SpatialDistortion (line 27) | class SpatialDistortion(nn.Module):
method forward (line 30) | def forward(
class SceneContraction (line 42) | class SceneContraction(SpatialDistortion):
method __init__ (line 63) | def __init__(
method forward (line 72) | def forward(self, positions):
class ForegroundAwareSceneContraction (line 107) | class ForegroundAwareSceneContraction(SpatialDistortion):
method __init__ (line 116) | def __init__(
method forward (line 134) | def forward(self, positions):
FILE: AutoReconForDens3R/nerfstudio/field_components/temporal_distortions.py
class TemporalDistortion (line 28) | class TemporalDistortion(nn.Module):
method forward (line 31) | def forward(self, positions: TensorType["bs":..., 3], times: Optional[...
class TemporalDistortionKind (line 42) | class TemporalDistortionKind(Enum):
method to_temporal_distortion (line 47) | def to_temporal_distortion(self, config: Dict[str, Any]) -> TemporalDi...
class DNeRFDistortion (line 54) | class DNeRFDistortion(TemporalDistortion):
method __init__ (line 64) | def __init__(
method forward (line 87) | def forward(self, positions, times=None):
FILE: AutoReconForDens3R/nerfstudio/fields/base_field.py
class FieldConfig (line 33) | class FieldConfig(InstantiateConfig):
class Field (line 40) | class Field(nn.Module):
method __init__ (line 43) | def __init__(self) -> None:
method density_fn (line 48) | def density_fn(self, positions: TensorType["bs":..., 3]) -> TensorType...
method get_density (line 68) | def get_density(self, ray_samples: RaySamples) -> Tuple[TensorType[......
method get_normals (line 75) | def get_normals(self) -> TensorType[..., 3]:
method get_outputs (line 94) | def get_outputs(
method forward (line 104) | def forward(self, ray_samples: RaySamples, compute_normals: bool = Fal...
FILE: AutoReconForDens3R/nerfstudio/fields/density_fields.py
class HashMLPDensityField (line 41) | class HashMLPDensityField(Field):
method __init__ (line 54) | def __init__(
method _get_normalized_positions (line 104) | def _get_normalized_positions(self, positions):
method get_density (line 112) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 134) | def get_outputs(self, ray_samples: RaySamples, density_embedding: Opti...
FILE: AutoReconForDens3R/nerfstudio/fields/feature_field.py
class FeatureFieldConfig (line 33) | class FeatureFieldConfig(FieldConfig):
class FeatureField (line 56) | class FeatureField(Field):
method __init__ (line 61) | def __init__(self, config: FeatureFieldConfig, spatial_distortion: Opt...
method build_ngp (line 81) | def build_ngp(self):
method build (line 142) | def build(self):
method query_features (line 145) | def query_features(
method get_outputs (line 158) | def get_outputs(self, ray_samples: RaySamples) -> Dict[str, torch.Tens...
method forward (line 173) | def forward(self, ray_samples: RaySamples):
method _get_input_positions (line 178) | def _get_input_positions(self, positions: torch.Tensor):
class FeatureSegFieldConfig (line 191) | class FeatureSegFieldConfig(FieldConfig):
class FeatureSegField (line 208) | class FeatureSegField(Field):
method __init__ (line 213) | def __init__(self, config: FeatureFieldConfig, ptcd_data: BasicPointCl...
method compute_seg_prob (line 233) | def compute_seg_prob(
method search_knn_fg (line 277) | def search_knn_fg(self, feats: TensorType["n", "c"]):
method search_knn_bg (line 280) | def search_knn_bg(self, feats: TensorType["n", "c"]):
method search_knn_plane (line 283) | def search_knn_plane(self, feats: TensorType["n", "c"]):
method _search_knn (line 286) | def _search_knn(
method compute_cos_sim (line 299) | def compute_cos_sim(
method _register_all_tensors_as_buffers (line 317) | def _register_all_tensors_as_buffers(self):
method forward (line 329) | def forward(
FILE: AutoReconForDens3R/nerfstudio/fields/instant_ngp_field.py
function get_normalized_directions (line 41) | def get_normalized_directions(directions: TensorType["bs":..., 3]):
class TCNNInstantNGPField (line 50) | class TCNNInstantNGPField(Field):
method __init__ (line 66) | def __init__(
method get_density (line 137) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 151) | def get_outputs(self, ray_samples: RaySamples, density_embedding: Opti...
method get_opacity (line 177) | def get_opacity(self, positions: TensorType["bs":..., 3], step_size) -...
FILE: AutoReconForDens3R/nerfstudio/fields/nerfacto_field.py
function get_normalized_directions (line 59) | def get_normalized_directions(directions: TensorType["bs":..., 3]):
class TCNNNerfactoField (line 68) | class TCNNNerfactoField(Field):
method __init__ (line 88) | def __init__(
method get_density (line 229) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 251) | def get_outputs(self, ray_samples: RaySamples, density_embedding: Opti...
method _get_appeanreance_embedding (line 313) | def _get_appeanreance_embedding(self, camera_indices: TensorType, dire...
method _get_normalized_positions (line 335) | def _get_normalized_positions(self, positions):
class TorchNerfactoField (line 346) | class TorchNerfactoField(Field):
method __init__ (line 351) | def __init__(
method get_density (line 397) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 408) | def get_outputs(
FILE: AutoReconForDens3R/nerfstudio/fields/nerfw_field.py
class VanillaNerfWField (line 38) | class VanillaNerfWField(Field):
method __init__ (line 54) | def __init__(
method get_density (line 112) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 120) | def get_outputs(
FILE: AutoReconForDens3R/nerfstudio/fields/sdf_field.py
class LaplaceDensity (line 54) | class LaplaceDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).c...
method __init__ (line 57) | def __init__(self, init_val, beta_min=0.0001):
method forward (line 62) | def forward(
method get_beta (line 73) | def get_beta(self):
class SigmoidDensity (line 79) | class SigmoidDensity(nn.Module): # alpha * Laplace(loc=0, scale=beta).c...
method __init__ (line 82) | def __init__(self, init_val, beta_min=0.0001):
method forward (line 87) | def forward(
method get_beta (line 100) | def get_beta(self):
class SingleVarianceNetwork (line 106) | class SingleVarianceNetwork(nn.Module):
method __init__ (line 116) | def __init__(self, init_val):
method forward (line 120) | def forward(self, x):
method get_variance (line 124) | def get_variance(self):
class SDFFieldConfig (line 130) | class SDFFieldConfig(FieldConfig):
class SDFField (line 202) | class SDFField(Field):
method __init__ (line 211) | def __init__(
method _build_geo_encodings (line 249) | def _build_geo_encodings(self) -> None:
method _build_geo_networks (line 307) | def _build_geo_networks(self):
method _build_color_encodings (line 348) | def _build_color_encodings(self, use_average_appearance_embedding):
method _build_color_networks (line 358) | def _build_color_networks(self):
method set_cos_anneal_ratio (line 383) | def set_cos_anneal_ratio(self, anneal: float) -> None:
method forward_geonetwork (line 387) | def forward_geonetwork(self, inputs):
method get_sdf (line 414) | def get_sdf(self, ray_samples: RaySamples):
method get_gradient (line 422) | def get_gradient(self, ray_samples: RaySamples):
method gradient (line 427) | def gradient(self, x):
method get_density (line 437) | def get_density(self, ray_samples: RaySamples):
method get_alpha (line 446) | def get_alpha(self, ray_samples: RaySamples, sdf=None, gradients=None):
method get_occupancy (line 498) | def get_occupancy(self, sdf):
method get_colors (line 503) | def get_colors(self, points, directions, normals, geo_features, camera...
method get_outputs (line 531) | def get_outputs(self, ray_samples: RaySamples, return_alphas=False, re...
method forward (line 594) | def forward(self, ray_samples: RaySamples, return_alphas=False, return...
method _get_appeanreance_embedding (line 605) | def _get_appeanreance_embedding(self, camera_indices: TensorType, dire...
method _get_direction_encoding (line 628) | def _get_direction_encoding(self):
method _get_input_dims (line 639) | def _get_input_dims(self):
method _get_geometric_init_radius (line 648) | def _get_geometric_init_radius(self):
method _get_input_positions (line 661) | def _get_input_positions(self, inputs: torch.Tensor):
method _get_normalized_positions (line 671) | def _get_normalized_positions(self, positions):
method _compute_curvature_loss (line 681) | def _compute_curvature_loss(self, inputs: TensorType["bs", 3], normals...
FILE: AutoReconForDens3R/nerfstudio/fields/semantic_nerf_field.py
class SemanticNerfField (line 36) | class SemanticNerfField(Field):
method __init__ (line 50) | def __init__(
method get_density (line 91) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 97) | def get_outputs(
FILE: AutoReconForDens3R/nerfstudio/fields/tensorf_field.py
class TensoRFField (line 33) | class TensoRFField(Field):
method __init__ (line 36) | def __init__(
method get_density (line 86) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 95) | def get_outputs(self, ray_samples: RaySamples, density_embedding: Opti...
method forward (line 115) | def forward(
FILE: AutoReconForDens3R/nerfstudio/fields/vanilla_nerf_field.py
class NeRFField (line 38) | class NeRFField(Field):
method __init__ (line 54) | def __init__(
method get_density (line 97) | def get_density(self, ray_samples: RaySamples):
method get_outputs (line 113) | def get_outputs(
method _get_normalized_positions (line 123) | def _get_normalized_positions(self, positions):
FILE: AutoReconForDens3R/nerfstudio/model_components/losses.py
function outer (line 35) | def outer(
function lossfun_outer (line 67) | def lossfun_outer(
function ray_samples_to_sdist (line 87) | def ray_samples_to_sdist(ray_samples):
function interlevel_loss (line 95) | def interlevel_loss(weights_list, ray_samples_list):
function lossfun_distortion (line 113) | def lossfun_distortion(t, w):
function distortion_loss (line 127) | def distortion_loss(weights_list, ray_samples_list):
function nerfstudio_distortion_loss (line 135) | def nerfstudio_distortion_loss(
function orientation_loss (line 177) | def orientation_loss(
function pred_normal_loss (line 192) | def pred_normal_loss(
function monosdf_normal_loss (line 201) | def monosdf_normal_loss(normal_pred: torch.Tensor, normal_gt: torch.Tens...
function compute_scale_and_shift (line 216) | def compute_scale_and_shift(prediction, target, mask):
function reduction_batch_based (line 239) | def reduction_batch_based(image_loss, M):
function reduction_image_based (line 251) | def reduction_image_based(image_loss, M):
function mse_loss (line 262) | def mse_loss(prediction, target, mask, reduction=reduction_batch_based):
function gradient_loss (line 271) | def gradient_loss(prediction, target, mask, reduction=reduction_batch_ba...
class MiDaSMSELoss (line 291) | class MiDaSMSELoss(nn.Module):
method __init__ (line 292) | def __init__(self, reduction="batch-based"):
method forward (line 300) | def forward(self, prediction, target, mask):
class GradientLoss (line 304) | class GradientLoss(nn.Module):
method __init__ (line 305) | def __init__(self, scales=4, reduction="batch-based"):
method forward (line 315) | def forward(self, prediction, target, mask):
class ScaleAndShiftInvariantLoss (line 331) | class ScaleAndShiftInvariantLoss(nn.Module):
method __init__ (line 332) | def __init__(self, alpha=0.5, scales=4, reduction="batch-based"):
method forward (line 341) | def forward(self, prediction, target, mask):
method __get_prediction_ssi (line 352) | def __get_prediction_ssi(self):
class SSIM (line 362) | class SSIM(nn.Module):
method __init__ (line 365) | def __init__(self, patch_size):
method forward (line 378) | def forward(self, x, y):
class NCC (line 396) | class NCC(nn.Module):
method __init__ (line 399) | def __init__(self, patch_size: int = 11, min_patch_variance: float = 0...
method forward (line 404) | def forward(self, x, y):
class MultiViewLoss (line 432) | class MultiViewLoss(nn.Module):
method __init__ (line 435) | def __init__(self, patch_size: int = 11, topk: int = 4, min_patch_vari...
method forward (line 447) | def forward(self, patches: torch.Tensor, valid: torch.Tensor):
class SensorDepthLoss (line 569) | class SensorDepthLoss(nn.Module):
method __init__ (line 572) | def __init__(self, truncation: float):
method forward (line 576) | def forward(self, batch, outputs):
FILE: AutoReconForDens3R/nerfstudio/model_components/patch_warping.py
function get_intersection_points (line 27) | def get_intersection_points(
function get_homography (line 93) | def get_homography(
class PatchWarping (line 140) | class PatchWarping(nn.Module):
method __init__ (line 143) | def __init__(self, patch_size: int = 31, pixel_offset: float = 0.5, va...
method forward (line 158) | def forward(
FILE: AutoReconForDens3R/nerfstudio/model_components/ray_generators.py
class RayGenerator (line 26) | class RayGenerator(nn.Module):
method __init__ (line 35) | def __init__(self, cameras: Cameras, pose_optimizer: CameraOptimizer) ...
method forward (line 41) | def forward(self, ray_indices: TensorType["num_rays", 3]) -> RayBundle:
FILE: AutoReconForDens3R/nerfstudio/model_components/ray_samplers.py
class Sampler (line 41) | class Sampler(nn.Module):
method __init__ (line 48) | def __init__(
method generate_ray_samples (line 56) | def generate_ray_samples(self) -> RaySamples:
method forward (line 59) | def forward(self, *args, **kwargs) -> RaySamples:
class SpacedSampler (line 64) | class SpacedSampler(Sampler):
method __init__ (line 75) | def __init__(
method generate_ray_samples (line 89) | def generate_ray_samples(
class UniformSampler (line 145) | class UniformSampler(SpacedSampler):
method __init__ (line 154) | def __init__(
class LinearDisparitySampler (line 169) | class LinearDisparitySampler(SpacedSampler):
method __init__ (line 178) | def __init__(
class SqrtSampler (line 193) | class SqrtSampler(SpacedSampler):
method __init__ (line 201) | def __init__(
class LogSampler (line 216) | class LogSampler(SpacedSampler):
method __init__ (line 224) | def __init__(
class UniformLinDispPiecewiseSampler (line 239) | class UniformLinDispPiecewiseSampler(SpacedSampler):
method __init__ (line 250) | def __init__(
class PDFSampler (line 265) | class PDFSampler(Sampler):
method __init__ (line 276) | def __init__(
method generate_ray_samples (line 290) | def generate_ray_samples(
class VolumetricSampler (line 397) | class VolumetricSampler(Sampler):
method __init__ (line 408) | def __init__(
method get_sigma_fn (line 423) | def get_sigma_fn(self, origins, directions) -> Optional[Callable]:
method generate_ray_samples (line 446) | def generate_ray_samples(self) -> RaySamples:
method forward (line 452) | def forward(
class ProposalNetworkSampler (line 525) | class ProposalNetworkSampler(Sampler):
method __init__ (line 528) | def __init__(
method set_anneal (line 556) | def set_anneal(self, anneal: float) -> None:
method step_cb (line 560) | def step_cb(self, step):
method generate_ray_samples (line 565) | def generate_ray_samples(
class ErrorBoundedSampler (line 609) | class ErrorBoundedSampler(Sampler):
method __init__ (line 612) | def __init__(
method generate_ray_samples (line 641) | def generate_ray_samples(
method get_dstar (line 732) | def get_dstar(self, sdf, ray_samples: RaySamples):
method get_updated_beta (line 756) | def get_updated_beta(self, beta0, beta, density_fn, sdf, d_star, ray_s...
method get_error_bound (line 768) | def get_error_bound(self, beta, density_fn, sdf, d_star, ray_samples):
method merge_ray_samples (line 786) | def merge_ray_samples(self, ray_bundle: RayBundle, ray_samples_1: RayS...
function save_points (line 825) | def save_points(path_save, pts, colors=None, normals=None, BRG2RGB=False):
class NeuSSampler (line 849) | class NeuSSampler(Sampler):
method __init__ (line 852) | def __init__(
method generate_ray_samples (line 881) | def generate_ray_samples(
method rendering_sdf_with_fixed_inv_s (line 943) | def rendering_sdf_with_fixed_inv_s(self, ray_samples: RaySamples, sdf:...
class UniSurfSampler (line 981) | class UniSurfSampler(Sampler):
method __init__ (line 984) | def __init__(
method step_cb (line 1022) | def step_cb(self, step):
method generate_ray_samples (line 1027) | def generate_ray_samples(
method merge_ray_samples_in_eculidean (line 1129) | def merge_ray_samples_in_eculidean(
method secant_method (line 1166) | def secant_method(
class NeuralReconWSampler (line 1175) | class NeuralReconWSampler(Sampler):
method __init__ (line 1178) | def __init__(
method init_grid_coordinate (line 1230) | def init_grid_coordinate(self):
method update_binary_grid (line 1245) | def update_binary_grid(self, step, sdf_fn=None):
method generate_ray_samples (line 1283) | def generate_ray_samples(
class NeuSAccSampler (line 1349) | class NeuSAccSampler(Sampler):
method __init__ (line 1352) | def __init__(
method init_grid_coordinate (line 1395) | def init_grid_coordinate(self):
method update_step_size (line 1412) | def update_step_size(self, step, inv_s=None):
method update_binary_grid (line 1418) | def update_binary_grid(self, step, sdf_fn=None, inv_s=None):
method create_ray_samples_from_ray_indices (line 1469) | def create_ray_samples_from_ray_indices(self, ray_bundle: RayBundle, r...
method generate_ray_samples (line 1491) | def generate_ray_samples(
class RayTracingSampler (line 1540) | class RayTracingSampler(Sampler):
method __init__ (line 1555) | def __init__(
method generate_ray_samples (line 1591) | def generate_ray_samples(
method adjust_aabb_intersection (line 1619) | def adjust_aabb_intersection(self, ray_bundle: RayBundle) -> RayBundle:
method sphere_tracing (line 1653) | def sphere_tracing(self, ray_bundle: RayBundle, sdf_fn: Callable):
method _sphere_tracing_idr (line 1715) | def _sphere_tracing_idr(self, ray_bundle: RayBundle, sdf_fn: Callable):
class DummySampler (line 1797) | class DummySampler(Sampler):
method __init__ (line 1799) | def __init__(self):
method generate_ray_samples (line 1802) | def generate_ray_samples(self, ray_bundle: RayBundle) -> RaySamples:
FILE: AutoReconForDens3R/nerfstudio/model_components/renderers.py
class RGBRenderer (line 46) | class RGBRenderer(nn.Module):
method __init__ (line 52) | def __init__(
method combine_rgb (line 60) | def combine_rgb(
method forward (line 102) | def forward(
class SHRenderer (line 133) | class SHRenderer(nn.Module):
method __init__ (line 140) | def __init__(
method forward (line 149) | def forward(
class AccumulationRenderer (line 182) | class AccumulationRenderer(nn.Module):
method forward (line 185) | def forward(
class DepthRenderer (line 210) | class DepthRenderer(nn.Module):
method __init__ (line 220) | def __init__(self, method: Literal["median", "expected"] = "median") -...
method forward (line 224) | def forward(
class UncertaintyRenderer (line 275) | class UncertaintyRenderer(nn.Module):
method forward (line 278) | def forward(
class SemanticRenderer (line 295) | class SemanticRenderer(nn.Module):
method forward (line 298) | def forward(
class NormalsRenderer (line 308) | class NormalsRenderer(nn.Module):
method forward (line 311) | def forward(
class FeatureRenderer (line 321) | class FeatureRenderer(nn.Module):
method forward (line 324) | def forward(
FILE: AutoReconForDens3R/nerfstudio/model_components/scene_colliders.py
class SceneCollider (line 29) | class SceneCollider(nn.Module):
method __init__ (line 32) | def __init__(self, **kwargs) -> None:
method set_nears_and_fars (line 36) | def set_nears_and_fars(self, ray_bundle) -> RayBundle:
method forward (line 40) | def forward(self, ray_bundle: RayBundle) -> RayBundle:
class AABBBoxCollider (line 47) | class AABBBoxCollider(SceneCollider):
method __init__ (line 54) | def __init__(self, scene_box: SceneBox, near_plane: float = 0.0, **kwa...
method _intersect_with_aabb (line 59) | def _intersect_with_aabb(
method set_nears_and_fars (line 97) | def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:
class NearFarCollider (line 111) | class NearFarCollider(SceneCollider):
method __init__ (line 119) | def __init__(self, near_plane: float, far_plane: float, **kwargs) -> N...
method set_nears_and_fars (line 124) | def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:
class SphereCollider (line 132) | class SphereCollider(SceneCollider):
method __init__ (line 136) | def __init__(self, radius: float = 1., near_plane: float = 0.1, far_pl...
method forward (line 142) | def forward(self, ray_bundle: RayBundle) -> RayBundle:
class AABBBoxNearFarCollider (line 165) | class AABBBoxNearFarCollider(SceneCollider):
method __init__ (line 171) | def __init__(self, scene_box: SceneBox, near_plane: float = 0.1, far_p...
method _intersect_with_aabb (line 177) | def _intersect_with_aabb(
method set_nears_and_fars (line 211) | def set_nears_and_fars(self, ray_bundle: RayBundle) -> RayBundle:
FILE: AutoReconForDens3R/nerfstudio/models/base_model.py
class ModelConfig (line 43) | class ModelConfig(InstantiateConfig):
class Model (line 58) | class Model(nn.Module):
method __init__ (line 70) | def __init__(
method device (line 94) | def device(self):
method get_training_callbacks (line 98) | def get_training_callbacks( # pylint:disable=no-self-use
method populate_modules (line 104) | def populate_modules(self):
method get_param_groups (line 115) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_outputs (line 123) | def get_outputs(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:
method forward (line 134) | def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:
method get_metrics_dict (line 147) | def get_metrics_dict(self, outputs, batch) -> Dict[str, torch.Tensor]:
method get_loss_dict (line 159) | def get_loss_dict(self, outputs, batch, metrics_dict=None, **kwargs) -...
method get_outputs_for_camera_ray_bundle (line 169) | def get_outputs_for_camera_ray_bundle(self, camera_ray_bundle: RayBund...
method get_image_metrics_and_images (line 200) | def get_image_metrics_and_images(
method load_model (line 216) | def load_model(self, loaded_state: Dict[str, Any]) -> None:
FILE: AutoReconForDens3R/nerfstudio/models/base_surface_model.py
function map_range_val (line 90) | def map_range_val(input_val, input_start, input_end, output_start, outpu...
class SurfaceModelConfig (line 104) | class SurfaceModelConfig(ModelConfig):
class SurfaceModel (line 211) | class SurfaceModel(Model):
method __init__ (line 220) | def __init__(self, *args, **kwargs):
method populate_modules (line 238) | def populate_modules(self):
method step_cb (line 279) | def step_cb(self, step):
method _build_scene_contraction (line 282) | def _build_scene_contraction(self):
method get_param_groups (line 298) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_training_callbacks (line 309) | def get_training_callbacks(
method _build_collider (line 351) | def _build_collider(self) -> None:
method _build_bg_model_and_sampler (line 370) | def _build_bg_model_and_sampler(self) -> None:
method _build_renderers (line 445) | def _build_renderers(self) -> None:
method sample_and_forward_field (line 457) | def sample_and_forward_field(self, ray_bundle: RayBundle) -> Dict:
method get_outputs (line 467) | def get_outputs(self, ray_bundle: RayBundle) -> Dict:
method forward (line 484) | def forward(self, ray_bundle: RayBundle) -> Dict[str, torch.Tensor]:
method fg_fields_query_and_render (line 503) | def fg_fields_query_and_render(self, ray_bundle: RayBundle) -> Dict[st...
method bg_fields_query_and_render (line 542) | def bg_fields_query_and_render(
method sample_and_forward_field_bg (line 580) | def sample_and_forward_field_bg(
method _sample_and_forward_field_bg_proposal (line 593) | def _sample_and_forward_field_bg_proposal(
method _update_outputs_for_loss (line 621) | def _update_outputs_for_loss(self, ray_bundle: RayBundle, outputs: Dic...
method _update_outptus_for_vis (line 643) | def _update_outptus_for_vis(
method _compute_prop_net_depths (line 653) | def _compute_prop_net_depths(
method _handle_no_intersect_rays_fg (line 668) | def _handle_no_intersect_rays_fg(self, ray_bundle: RayBundle, samples_...
method _handle_bottom_intersect_rays_bg (line 697) | def _handle_bottom_intersect_rays_bg(self, ray_bundle: RayBundle, samp...
method _reset_near_far_for_background (line 706) | def _reset_near_far_for_background(self, ray_bundle: RayBundle) -> Ray...
method get_outputs_flexible (line 721) | def get_outputs_flexible(self, ray_bundle: RayBundle, additional_input...
method get_loss_dict (line 754) | def get_loss_dict(self, outputs, batch, metrics_dict=None, step=None) ...
method get_metrics_dict (line 858) | def get_metrics_dict(self, outputs, batch) -> Dict:
method get_image_metrics_and_images (line 864) | def get_image_metrics_and_images(
method _get_no_intersection_ray_mask (line 939) | def _get_no_intersection_ray_mask(self, outputs: Dict[str, torch.Tenso...
method _compute_curvature_loss (line 944) | def _compute_curvature_loss(self, outputs: Dict[str, torch.Tensor], lo...
method _compute_interlevel_loss_bg (line 965) | def _compute_interlevel_loss_bg(self, outputs: Dict[str, torch.Tensor]...
method _get_interlevel_loss_inputs_bg (line 977) | def _get_interlevel_loss_inputs_bg(self, outputs: Dict[str, torch.Tens...
method _build_anneal_fn (line 992) | def _build_anneal_fn(self, anneal_fn_name, max_step):
method _compute_ptcd_reg_fg (line 1008) | def _compute_ptcd_reg_fg(self, outputs: Dict[str, torch.Tensor], loss_...
method _compute_ptcd_reg_plane (line 1023) | def _compute_ptcd_reg_plane(self, outputs: Dict[str, torch.Tensor], lo...
method _compute_mask_beta_prior (line 1042) | def _compute_mask_beta_prior(self, outputs: Dict[str, torch.Tensor], l...
FILE: AutoReconForDens3R/nerfstudio/models/distilled_neus_facto.py
class DistilledNeuSFactoModelConfig (line 32) | class DistilledNeuSFactoModelConfig(NeuSFactoModelConfig):
class DistilledNeuSFactoModel (line 49) | class DistilledNeuSFactoModel(NeuSFactoModel):
method populate_modules (line 53) | def populate_modules(self):
method sample_and_forward_field (line 77) | def sample_and_forward_field(self, ray_bundle: RayBundle):
method sample_and_forward_field_bg (line 100) | def sample_and_forward_field_bg(
method forward_field (line 127) | def forward_field(self, ray_samples):
method _forward_field_sdf (line 135) | def _forward_field_sdf(self, ray_samples):
method _forward_field_nerfacto (line 144) | def _forward_field_nerfacto(self, ray_samples):
method _mask_ray_samples_with_aabb (line 153) | def _mask_ray_samples_with_aabb(self, ray_bundle: RayBundle, ray_sampl...
method _compute_aabb_intersections (line 184) | def _compute_aabb_intersections(self, ray_bundle: RayBundle):
FILE: AutoReconForDens3R/nerfstudio/models/dto.py
class DtoOModelConfig (line 58) | class DtoOModelConfig(NerfactoModelConfig):
class DtoOModel (line 68) | class DtoOModel(NerfactoModel):
method populate_modules (line 77) | def populate_modules(self):
method get_training_callbacks (line 141) | def get_training_callbacks(
method get_param_groups (line 166) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_outputs (line 171) | def get_outputs(self, ray_bundle: RayBundle):
method get_metrics_dict (line 437) | def get_metrics_dict(self, outputs, batch):
method get_loss_dict (line 457) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 512) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/instant_ngp.py
class InstantNGPModelConfig (line 52) | class InstantNGPModelConfig(ModelConfig):
class NGPModel (line 85) | class NGPModel(Model):
method __init__ (line 95) | def __init__(self, config: InstantNGPModelConfig, **kwargs) -> None:
method populate_modules (line 98) | def populate_modules(self):
method get_training_callbacks (line 140) | def get_training_callbacks(
method get_param_groups (line 159) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_outputs (line 166) | def get_outputs(self, ray_bundle: RayBundle):
method get_metrics_dict (line 211) | def get_metrics_dict(self, outputs, batch):
method get_loss_dict (line 218) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 225) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/mipnerf.py
class MipNerfModel (line 43) | class MipNerfModel(Model):
method __init__ (line 50) | def __init__(
method populate_modules (line 58) | def populate_modules(self):
method get_param_groups (line 91) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_outputs (line 98) | def get_outputs(self, ray_bundle: RayBundle):
method get_loss_dict (line 139) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 147) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/monosdf.py
class MonoSDFModelConfig (line 28) | class MonoSDFModelConfig(VolSDFModelConfig):
class MonoSDFModel (line 38) | class MonoSDFModel(VolSDFModel):
FILE: AutoReconForDens3R/nerfstudio/models/nerfacto.py
class NerfactoModelConfig (line 63) | class NerfactoModelConfig(ModelConfig):
class NerfactoModel (line 122) | class NerfactoModel(Model):
method populate_modules (line 131) | def populate_modules(self):
method get_param_groups (line 207) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_training_callbacks (line 213) | def get_training_callbacks(
method get_outputs (line 244) | def get_outputs(self, ray_bundle: RayBundle):
method get_metrics_dict (line 286) | def get_metrics_dict(self, outputs, batch):
method get_loss_dict (line 294) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 316) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/neuralreconW.py
class NeuralReconWModelConfig (line 35) | class NeuralReconWModelConfig(NeuSModelConfig):
class NeuralReconWModel (line 41) | class NeuralReconWModel(NeuSModel):
method populate_modules (line 50) | def populate_modules(self):
method get_training_callbacks (line 61) | def get_training_callbacks(
FILE: AutoReconForDens3R/nerfstudio/models/neus.py
class NeuSModelConfig (line 37) | class NeuSModelConfig(SurfaceModelConfig):
class NeuSModel (line 55) | class NeuSModel(SurfaceModel):
method populate_modules (line 64) | def populate_modules(self):
method get_training_callbacks (line 78) | def get_training_callbacks(
method sample_and_forward_field (line 100) | def sample_and_forward_field(self, ray_bundle: RayBundle) -> Dict:
method get_metrics_dict (line 117) | def get_metrics_dict(self, outputs, batch) -> Dict:
FILE: AutoReconForDens3R/nerfstudio/models/neus_acc.py
class NeuSAccModelConfig (line 39) | class NeuSAccModelConfig(NeuSModelConfig):
class NeuSAccModel (line 47) | class NeuSAccModel(NeuSModel):
method populate_modules (line 56) | def populate_modules(self):
method get_training_callbacks (line 63) | def get_training_callbacks(
method get_outputs (line 91) | def get_outputs(self, ray_bundle: RayBundle):
method get_metrics_dict (line 140) | def get_metrics_dict(self, outputs, batch):
FILE: AutoReconForDens3R/nerfstudio/models/neus_facto.py
class NeuSFactoModelConfig (line 45) | class NeuSFactoModelConfig(NeuSModelConfig):
class NeuSFactoModel (line 93) | class NeuSFactoModel(NeuSModel):
method populate_modules (line 102) | def populate_modules(self):
method get_param_groups (line 155) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_training_callbacks (line 160) | def get_training_callbacks(
method sample_and_forward_field (line 193) | def sample_and_forward_field(self, ray_bundle: RayBundle):
method get_loss_dict (line 217) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 226) | def get_image_metrics_and_images(
method _compute_interlevel_loss (line 241) | def _compute_interlevel_loss(self, outputs: Dict[str, torch.Tensor], l...
method _get_interlevel_loss_inputs (line 246) | def _get_interlevel_loss_inputs(self, outputs):
method _compute_proposal_eikonal_loss (line 260) | def _compute_proposal_eikonal_loss(
FILE: AutoReconForDens3R/nerfstudio/models/neus_facto_dff.py
class NeuSFactoDFFModelConfig (line 37) | class NeuSFactoDFFModelConfig(NeuSFactoModelConfig):
class NeuSFactoDFFModel (line 74) | class NeuSFactoDFFModel(NeuSFactoModel):
method populate_modules (line 83) | def populate_modules(self):
method get_param_groups (line 106) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method sample_and_forward_field (line 117) | def sample_and_forward_field(self, ray_bundle: RayBundle):
method _sample_and_forward_field_ray_tracing (line 152) | def _sample_and_forward_field_ray_tracing(
method get_outputs (line 206) | def get_outputs(self, ray_bundle: RayBundle) -> Dict:
method get_loss_dict (line 246) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 257) | def get_image_metrics_and_images(
method query_seg_field (line 269) | def query_seg_field(self, inputs: TensorType["bs": ..., 3]) -> TensorT...
method seg_aware_sdf (line 275) | def seg_aware_sdf(self, inputs: TensorType["bs": ..., 3]) -> TensorTyp...
method _rectify_sdf (line 284) | def _rectify_sdf(self, sdf: TensorType["bs": ..., 1], seg: TensorType[...
method _get_pca_feature_rendering (line 293) | def _get_pca_feature_rendering(
method _get_fg_seg_rendering (line 321) | def _get_fg_seg_rendering(
method _get_pseudo_gt_seg_mask (line 344) | def _get_pseudo_gt_seg_mask(self,
method _get_ray_tracing_mask (line 364) | def _get_ray_tracing_mask(
method _get_ray_tracing_depth (line 389) | def _get_ray_tracing_depth(
method get_outputs_for_mesh_culling (line 405) | def get_outputs_for_mesh_culling(
method render_fg_seg_for_mesh_culling (line 438) | def render_fg_seg_for_mesh_culling(
FILE: AutoReconForDens3R/nerfstudio/models/neus_facto_reg.py
class NeuSFactoRegModelConfig (line 29) | class NeuSFactoRegModelConfig(NeuSFactoModelConfig):
class NeuSFactoRegModel (line 65) | class NeuSFactoRegModel(NeuSFactoModel):
method populate_modules (line 70) | def populate_modules(self):
method get_param_groups (line 117) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method sample_and_forward_field (line 122) | def sample_and_forward_field(self, ray_bundle: RayBundle):
method fg_fields_query_and_render (line 172) | def fg_fields_query_and_render(self, ray_bundle: RayBundle) -> Dict[st...
method get_loss_dict (line 230) | def get_loss_dict(self, outputs, batch, metrics_dict=None, step=None):
method get_image_metrics_and_images (line 247) | def get_image_metrics_and_images(
method _compute_ptcd_reg_plane_field (line 264) | def _compute_ptcd_reg_plane_field(self, outputs: Dict[str, torch.Tenso...
method get_metrics_dict (line 279) | def get_metrics_dict(self, outputs, batch) -> Dict:
FILE: AutoReconForDens3R/nerfstudio/models/semantic_nerfw.py
class SemanticNerfWModelConfig (line 58) | class SemanticNerfWModelConfig(NerfactoModelConfig):
class SemanticNerfWModel (line 71) | class SemanticNerfWModel(Model):
method __init__ (line 80) | def __init__(self, config: SemanticNerfWModelConfig, metadata: Dict, *...
method populate_modules (line 85) | def populate_modules(self):
method get_param_groups (line 148) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_training_callbacks (line 154) | def get_training_callbacks(
method get_outputs (line 178) | def get_outputs(self, ray_bundle: RayBundle):
method get_metrics_dict (line 228) | def get_metrics_dict(self, outputs, batch):
method get_loss_dict (line 236) | def get_loss_dict(self, outputs, batch, metrics_dict=None):
method get_image_metrics_and_images (line 256) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/tensorf.py
class TensoRFModelConfig (line 54) | class TensoRFModelConfig(ModelConfig):
class TensoRFModel (line 77) | class TensoRFModel(Model):
method __init__ (line 84) | def __init__(
method get_training_callbacks (line 109) | def get_training_callbacks(
method populate_modules (line 146) | def populate_modules(self):
method get_param_groups (line 196) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_outputs (line 210) | def get_outputs(self, ray_bundle: RayBundle):
method get_loss_dict (line 242) | def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str...
method get_image_metrics_and_images (line 253) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/unisurf.py
class UniSurfModelConfig (line 38) | class UniSurfModelConfig(SurfaceModelConfig):
class UniSurfModel (line 56) | class UniSurfModel(SurfaceModel):
method populate_modules (line 65) | def populate_modules(self):
method get_training_callbacks (line 79) | def get_training_callbacks(
method sample_and_forward_field (line 92) | def sample_and_forward_field(self, ray_bundle: RayBundle) -> Dict:
method get_metrics_dict (line 111) | def get_metrics_dict(self, outputs, batch) -> Dict:
method get_loss_dict (line 119) | def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict:
FILE: AutoReconForDens3R/nerfstudio/models/vanilla_nerf.py
class VanillaModelConfig (line 48) | class VanillaModelConfig(ModelConfig):
class NeRFModel (line 63) | class NeRFModel(Model):
method __init__ (line 70) | def __init__(
method populate_modules (line 84) | def populate_modules(self):
method get_param_groups (line 128) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
method get_outputs (line 137) | def get_outputs(self, ray_bundle: RayBundle):
method get_loss_dict (line 184) | def get_loss_dict(self, outputs, batch, metrics_dict=None) -> Dict[str...
method get_image_metrics_and_images (line 196) | def get_image_metrics_and_images(
FILE: AutoReconForDens3R/nerfstudio/models/volsdf.py
class VolSDFModelConfig (line 31) | class VolSDFModelConfig(SurfaceModelConfig):
class VolSDFModel (line 43) | class VolSDFModel(SurfaceModel):
method populate_modules (line 52) | def populate_modules(self):
method sample_and_forward_field (line 62) | def sample_and_forward_field(self, ray_bundle: RayBundle) -> Dict:
method get_metrics_dict (line 79) | def get_metrics_dict(self, outputs, batch) -> Dict:
FILE: AutoReconForDens3R/nerfstudio/pipelines/base_pipeline.py
function module_wrapper (line 55) | def module_wrapper(ddp_or_model: Union[DDP, Model]) -> Model:
class Pipeline (line 64) | class Pipeline(nn.Module):
method model (line 103) | def model(self):
method device (line 108) | def device(self):
method get_train_loss_dict (line 113) | def get_train_loss_dict(self, step: int):
method get_eval_loss_dict (line 132) | def get_eval_loss_dict(self, step: int):
method get_eval_image_metrics_and_images (line 152) | def get_eval_image_metrics_and_images(self, step: int):
method get_average_eval_image_metrics (line 162) | def get_average_eval_image_metrics(self, step: Optional[int] = None):
method load_pipeline (line 165) | def load_pipeline(self, loaded_state: Dict[str, Any]) -> None:
method get_training_callbacks (line 172) | def get_training_callbacks(
method get_param_groups (line 177) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
class VanillaPipelineConfig (line 186) | class VanillaPipelineConfig(cfg.InstantiateConfig):
class VanillaPipeline (line 197) | class VanillaPipeline(Pipeline):
method __init__ (line 214) | def __init__(
method device (line 250) | def device(self):
method get_train_loss_dict (line 255) | def get_train_loss_dict(self, step: int):
method forward (line 281) | def forward(self):
method get_eval_loss_dict (line 289) | def get_eval_loss_dict(self, step: int):
method get_eval_image_metrics_and_images (line 305) | def get_eval_image_metrics_and_images(self, step: int):
method get_average_eval_image_metrics (line 324) | def get_average_eval_image_metrics(self, step: Optional[int] = None):
method load_pipeline (line 371) | def load_pipeline(self, loaded_state: Dict[str, Any], strict=True) -> ...
method get_training_callbacks (line 392) | def get_training_callbacks(
method get_param_groups (line 401) | def get_param_groups(self) -> Dict[str, List[Parameter]]:
class FlexibleInputPipelineConfig (line 414) | class FlexibleInputPipelineConfig(VanillaPipelineConfig):
class FlexibleInputPipeline (line 425) | class FlexibleInputPipeline(VanillaPipeline):
method get_train_loss_dict (line 427) | def get_train_loss_dict(self, step: int):
FILE: AutoReconForDens3R/nerfstudio/pipelines/dynamic_batch.py
class DynamicBatchPipelineConfig (line 30) | class DynamicBatchPipelineConfig(VanillaPipelineConfig):
class DynamicBatchPipeline (line 40) | class DynamicBatchPipeline(VanillaPipeline):
method __init__ (line 49) | def __init__(
method _update_pixel_samplers (line 65) | def _update_pixel_samplers(self):
method _update_dynamic_num_rays_per_batch (line 70) | def _update_dynamic_num_rays_per_batch(self, num_samples_per_batch: int):
method get_train_loss_dict (line 77) | def get_train_loss_dict(self, step: int):
method get_eval_loss_dict (line 95) | def get_eval_loss_dict(self, step: int):
FILE: AutoReconForDens3R/nerfstudio/process_data/colmap_utils.py
class ColmapCameraModel (line 61) | class ColmapCameraModel:
class Camera (line 73) | class Camera:
class Image (line 89) | class Image:
class Point3D (line 109) | class Point3D:
function get_colmap_version (line 143) | def get_colmap_version(colmap_cmd: str, default_version=3.8) -> float:
function read_next_bytes (line 162) | def read_next_bytes(fid: BufferedReader, num_bytes: int, format_char_seq...
function read_cameras_text (line 176) | def read_cameras_text(path: Path) -> Dict[int, Camera]:
function read_cameras_binary (line 202) | def read_cameras_binary(path_to_model_file: Path) -> Dict[int, Camera]:
function read_images_text (line 229) | def read_images_text(path: Path) -> Dict[int, Image]:
function read_images_binary (line 266) | def read_images_binary(path_to_model_file: Path) -> Dict[int, Image]:
function read_points3d_text (line 304) | def read_points3d_text(path) -> Dict[int, Point3D]:
function read_points3d_binary (line 333) | def read_points3d_binary(path_to_model_file: Path) -> Dict[int, Point3D]:
function detect_model_format (line 360) | def detect_model_format(path: Path, ext: str) -> bool:
function read_model (line 381) | def read_model(path: Path, ext: Optional[str] = None) -> Tuple[Dict[int,...
function qvec2rotmat (line 410) | def qvec2rotmat(qvec) -> np.ndarray:
function rotmat2qvec (line 439) | def rotmat2qvec(R):
function get_vocab_tree (line 466) | def get_vocab_tree() -> Path:
function run_colmap (line 491) | def run_colmap(
function colmap_to_json (line 578) | def colmap_to_json(cameras_path: Path, images_path: Path, output_dir: Pa...
function get_matching_summary (line 654) | def get_matching_summary(num_intial_frames: int, num_matched_frames: int...
FILE: AutoReconForDens3R/nerfstudio/process_data/hloc_utils.py
function run_hloc (line 47) | def run_hloc(
FILE: AutoReconForDens3R/nerfstudio/process_data/insta360_utils.py
function get_insta360_filenames (line 30) | def get_insta360_filenames(data: Path) -> Tuple[Path, Path]:
function convert_insta360_to_images (line 54) | def convert_insta360_to_images(
function convert_insta360_single_file_to_images (line 125) | def convert_insta360_single_file_to_images(
FILE: AutoReconForDens3R/nerfstudio/process_data/metashape_utils.py
function _find_distortion_param (line 30) | def _find_distortion_param(calib_xml: ET.Element, param_name: str):
function metashape_to_json (line 37) | def metashape_to_json( # pylint: disable=too-many-statements
FILE: AutoReconForDens3R/nerfstudio/process_data/polycam_utils.py
function polycam_to_json (line 30) | def polycam_to_json(
FILE: AutoReconForDens3R/nerfstudio/process_data/process_data_utils.py
class CameraModel (line 32) | class CameraModel(Enum):
function get_num_frames_in_video (line 45) | def get_num_frames_in_video(video: Path) -> int:
function convert_video_to_images (line 62) | def convert_video_to_images(
function copy_images_list (line 111) | def copy_images_list(
function copy_images (line 156) | def copy_images(data: Path, image_dir: Path, verbose) -> int:
function downscale_images (line 175) | def downscale_images(image_dir: Path, num_downscales: int, verbose: bool...
function find_tool_feature_matcher_combination (line 215) | def find_tool_feature_matcher_combination(
FILE: AutoReconForDens3R/nerfstudio/process_data/record3d_utils.py
function record3d_to_json (line 31) | def record3d_to_json(images_paths: List[Path], metadata_path: Path, outp...
FILE: AutoReconForDens3R/nerfstudio/utils/bilateral_solver.py
function rgb2yuv (line 16) | def rgb2yuv(im):
function yuv2rgb (line 20) | def yuv2rgb(im):
function get_valid_idx (line 24) | def get_valid_idx(valid, candidates):
class BilateralGrid (line 35) | class BilateralGrid(object):
method __init__ (line 36) | def __init__(self, im, sigma_spatial=32, sigma_luma=8, sigma_chroma=8):
method _compute_factorization (line 53) | def _compute_factorization(self, coords_flat):
method _hash_coords (line 79) | def _hash_coords(self, coord):
method splat (line 83) | def splat(self, x):
method slice (line 86) | def slice(self, y):
method blur (line 89) | def blur(self, x):
method filter (line 97) | def filter(self, x):
function bistochastize (line 103) | def bistochastize(grid, maxiter=10):
class BilateralSolver (line 117) | class BilateralSolver(object):
method __init__ (line 118) | def __init__(self, grid, params):
method solve (line 123) | def solve(self, x, w):
function bilateral_refine (line 154) | def bilateral_refine(image, mask, sigma_spatial=24, sigma_luma=4, sigma_...
FILE: AutoReconForDens3R/nerfstudio/utils/colormaps.py
function apply_colormap (line 26) | def apply_colormap(image: TensorType["bs":..., 1], cmap="viridis") -> Te...
function apply_depth_colormap (line 48) | def apply_depth_colormap(
function apply_boolean_colormap (line 83) | def apply_boolean_colormap(
FILE: AutoReconForDens3R/nerfstudio/utils/colors.py
function get_color (line 36) | def get_color(color: Union[str, list]) -> TensorType[3]:
FILE: AutoReconForDens3R/nerfstudio/utils/comms.py
function is_dist_avail_and_initialized (line 21) | def is_dist_avail_and_initialized() -> bool:
function get_world_size (line 26) | def get_world_size() -> int:
function get_rank (line 33) | def get_rank() -> int:
function get_local_rank (line 40) | def get_local_rank() -> int:
function get_local_size (line 50) | def get_local_size() -> int:
function is_main_process (line 60) | def is_main_process() -> bool:
function synchronize (line 65) | def synchronize():
FILE: AutoReconForDens3R/nerfstudio/utils/decorators.py
function decorate_all (line 23) | def decorate_all(decorators: List[Callable]) -> Callable:
function check_profiler_enabled (line 40) | def check_profiler_enabled(func: Callable) -> Callable:
function check_viewer_enabled (line 52) | def check_viewer_enabled(func: Callable) -> Callable:
function check_eval_enabled (line 64) | def check_eval_enabled(func: Callable) -> Callable:
function check_main_thread (line 76) | def check_main_thread(func: Callable) -> Callable:
FILE: AutoReconForDens3R/nerfstudio/utils/eval_utils.py
function eval_load_checkpoint (line 36) | def eval_load_checkpoint(config: cfg.TrainerConfig, pipeline: Pipeline) ...
function eval_setup (line 71) | def eval_setup(
FILE: AutoReconForDens3R/nerfstudio/utils/func_utils.py
function get_first_element (line 5) | def get_first_element(elems: Sequence):
FILE: AutoReconForDens3R/nerfstudio/utils/images.py
class BasicImages (line 26) | class BasicImages:
method __init__ (line 39) | def __init__(self, images: List):
method to (line 46) | def to(self, device):
FILE: AutoReconForDens3R/nerfstudio/utils/install_checks.py
function check_ffmpeg_installed (line 25) | def check_ffmpeg_installed():
function check_colmap_installed (line 35) | def check_colmap_installed():
FILE: AutoReconForDens3R/nerfstudio/utils/io.py
function load_from_json (line 23) | def load_from_json(filename: Path):
function write_to_json (line 34) | def write_to_json(filename: Path, content: dict):
FILE: AutoReconForDens3R/nerfstudio/utils/marching_cubes.py
function get_surface_sliding (line 17) | def get_surface_sliding(
function get_surface_occupancy (line 177) | def get_surface_occupancy(
FILE: AutoReconForDens3R/nerfstudio/utils/mask_utils.py
function alpha_composite (line 8) | def alpha_composite(rgb0: torch.Tensor, rgb1: torch.Tensor,
function overlay_mask_on_image (line 17) | def overlay_mask_on_image(mask_rgb: TensorType["h", "w", 3],
function compute_mask_indices (line 25) | def compute_mask_indices(collated_batch: Dict[str, Any]) -> Dict[str, Any]:
FILE: AutoReconForDens3R/nerfstudio/utils/math.py
function components_from_spherical_harmonics (line 23) | def components_from_spherical_harmonics(levels: int, directions: TensorT...
class Gaussians (line 88) | class Gaussians:
function compute_3d_gaussian (line 100) | def compute_3d_gaussian(
function cylinder_to_gaussian (line 128) | def cylinder_to_gaussian(
function conical_frustum_to_gaussian (line 153) | def conical_frustum_to_gaussian(
function expected_sin (line 182) | def expected_sin(x_means: torch.Tensor, x_vars: torch.Tensor) -> torch.T...
FILE: AutoReconForDens3R/nerfstudio/utils/misc.py
function get_dict_to_torch (line 25) | def get_dict_to_torch(stuff: Any, device: Union[torch.device, str] = "cp...
function get_dict_to_cpu (line 45) | def get_dict_to_cpu(stuff: Any):
function get_masked_dict (line 60) | def get_masked_dict(d, mask):
class IterableWrapper (line 74) | class IterableWrapper: # pylint: disable=too-few-public-methods
method __init__ (line 99) | def __init__(self, new_iter: Callable, new_next: Callable, length: int...
method __next__ (line 104) | def __next__(self):
method __iter__ (line 110) | def __iter__(self):
function scale_dict (line 116) | def scale_dict(dictionary: Dict[Any, Any], coefficients: Dict[str, float...
function step_check (line 132) | def step_check(step, step_size, run_at_zero=False) -> bool:
function update_avg (line 139) | def update_avg(prev_avg: float, new_val: float, step: int) -> float:
FILE: AutoReconForDens3R/nerfstudio/utils/plotly_utils.py
function color_str (line 38) | def color_str(color):
function get_line_segments_from_lines (line 51) | def get_line_segments_from_lines(
function vis_dataset (line 102) | def vis_dataset(camera_origins: TensorType["num_cameras", 3], ray_bundle...
function get_random_color (line 150) | def get_random_color(colormap: Optional[List[str]] = None, idx: Optional...
function get_sphere (line 167) | def get_sphere(
function get_cube (line 207) | def get_cube(
function get_gaussian_ellipsiod (line 247) | def get_gaussian_ellipsiod(
function get_gaussian_ellipsoids_list (line 306) | def get_gaussian_ellipsoids_list(
function get_frustum_mesh (line 349) | def get_frustum_mesh(
function get_frustums_mesh_list (line 400) | def get_frustums_mesh_list(
function get_frustum_points (line 424) | def get_frustum_points(
function get_ray_bundle_lines (line 456) | def get_ray_bundle_lines(
function vis_camera_rays (line 487) | def vis_camera_rays(cameras: Cameras) -> go.Figure: # type: ignore
function get_camera_frustums (line 541) | def get_camera_frustums(cameras: Cameras):
FILE: AutoReconForDens3R/nerfstudio/utils/pointclouds.py
class BasicPointClouds (line 19) | class BasicPointClouds(nn.Module):
method __init__ (line 33) | def __init__(
method build_nn_search_index (line 70) | def build_nn_search_index(self, separate_bg_plane: bool = False):
method sample_fg_points (line 103) | def sample_fg_points(self, n_pts: int, replace: bool = True):
method sample_plane_points (line 116) | def sample_plane_points(self, n_pts: int, replace: bool = True, rand_s...
method device (line 135) | def device(self):
method _register_all_tensors_as_buffers (line 138) | def _register_all_tensors_as_buffers(self):
FILE: AutoReconForDens3R/nerfstudio/utils/poses.py
function to4x4 (line 23) | def to4x4(pose: TensorType[..., 3, 4]) -> TensorType[..., 4, 4]:
function inverse (line 37) | def inverse(pose: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]:
function multiply (line 53) | def multiply(pose_a: TensorType[..., 3, 4], pose_b: TensorType[..., 3, 4...
function normalize (line 70) | def normalize(poses: TensorType[..., 3, 4]) -> TensorType[..., 3, 4]:
FILE: AutoReconForDens3R/nerfstudio/utils/printing.py
function print_tcnn_speed_warning (line 24) | def print_tcnn_speed_warning(method_name: str):
function human_format (line 36) | def human_format(num):
FILE: AutoReconForDens3R/nerfstudio/utils/profiler.py
function time_function (line 39) | def time_function(func: Callable) -> Callable:
function time_function_cuda (line 57) | def time_function_cuda(func: Callable) -> Callable:
function flush_profiler (line 75) | def flush_profiler(config: cfg.LoggingConfig):
function setup_profiler (line 81) | def setup_profiler(config: cfg.LoggingConfig):
class Profiler (line 88) | class Profiler:
method __init__ (line 91) | def __init__(self, config: cfg.LoggingConfig):
method update_time (line 95) | def update_time(self, func_name: str, start_time: float, end_time: flo...
method print_profile (line 109) | def print_profile(self):
FILE: AutoReconForDens3R/nerfstudio/utils/rich_utils.py
class ItersPerSecColumn (line 34) | class ItersPerSecColumn(ProgressColumn):
method __init__ (line 37) | def __init__(self, suffix="it/s") -> None:
method render (line 41) | def render(self, task: "Task") -> Text:
function status (line 49) | def status(msg: str, spinner: str = "bouncingBall", verbose: bool = False):
function get_progress (line 62) | def get_progress(description: str, suffix: Optional[str] = None):
FILE: AutoReconForDens3R/nerfstudio/utils/scheduler.py
function cosine_annealing (line 7) | def cosine_annealing(cur_step, max_step=None, min_val=0.0, max_val=1.0):
function _compute_exp_anneal_gamma (line 14) | def _compute_exp_anneal_gamma(max_step, min_val, max_val):
function exp_annealing (line 18) | def exp_annealing(cur_step, max_step=None, min_val=0.0, max_val=1.0):
function constant (line 23) | def constant(cur_step, max_step=None, min_val=0.0, max_val=1.0):
FILE: AutoReconForDens3R/nerfstudio/utils/scripts.py
function run_command (line 25) | def run_command(cmd: str, verbose=False) -> Optional[str]:
FILE: AutoReconForDens3R/nerfstudio/utils/tensor_dataclass.py
class TensorDataclass (line 27) | class TensorDataclass:
method __post_init__ (line 67) | def __post_init__(self) -> None:
method _get_dict_batch_shapes (line 97) | def _get_dict_batch_shapes(self, dict_: Dict) -> List:
method _broadcast_dict_fields (line 120) | def _broadcast_dict_fields(self, dict_: Dict, batch_shape) -> Dict:
method __getitem__ (line 149) | def __getitem__(self: TensorDataclassT, indices) -> TensorDataclassT:
method __setitem__ (line 164) | def __setitem__(self, indices, value) -> NoReturn:
method __len__ (line 167) | def __len__(self) -> int:
method __bool__ (line 172) | def __bool__(self) -> bool:
method shape (line 181) | def shape(self) -> Tuple[int, ...]:
method size (line 186) | def size(self) -> int:
method ndim (line 193) | def ndim(self) -> int:
method reshape (line 197) | def reshape(self: TensorDataclassT, shape: Tuple[int, ...]) -> TensorD...
method flatten (line 219) | def flatten(self: TensorDataclassT) -> TensorDataclassT:
method broadcast_to (line 227) | def broadcast_to(self: TensorDataclassT, shape: Union[torch.Size, Tupl...
method to (line 248) | def to(self: TensorDataclassT, device) -> TensorDataclassT:
method _apply_fn_to_fields (line 259) | def _apply_fn_to_fields(
method _apply_fn_to_dict (line 293) | def _apply_fn_to_dict(
FILE: AutoReconForDens3R/nerfstudio/utils/vis_utils.py
function interpolate_trajectory (line 14) | def interpolate_trajectory(cameras: Cameras, num_views: int = 300):
FILE: AutoReconForDens3R/nerfstudio/utils/writer.py
class EventName (line 43) | class EventName(enum.Enum):
class EventType (line 57) | class EventType(enum.Enum):
function put_image (line 67) | def put_image(name, image: TensorType["H", "W", "C"], step: int):
function put_scalar (line 81) | def put_scalar(name: str, scalar: Any, step: int):
function put_dict (line 96) | def put_dict(name: str, scalar_dict: Dict[str, Any], step: int):
function put_config (line 108) | def put_config(name: str, config_dict: Dict[str, Any], step: int):
function put_time (line 120) | def put_time(name: str, duration: float, step: int, avg_over_steps: bool...
function write_out_storage (line 156) | def write_out_storage():
function setup_local_writer (line 169) | def setup_local_writer(config: cfg.LoggingConfig, max_iter: int, banner_...
function setup_event_writer (line 191) | def setup_event_writer(config: cfg.Config, log_dir: Path) -> None:
class Writer (line 215) | class Writer:
method write_image (line 219) | def write_image(self, name: str, image: TensorType["H", "W", "C"], ste...
method write_scalar (line 230) | def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], ...
method write_scalar_dict (line 241) | def write_scalar_dict(self, name: str, scalar_dict: Dict[str, Any], st...
class TimeWriter (line 252) | class TimeWriter:
method __init__ (line 255) | def __init__(self, writer, name, step=None, write=True):
method __enter__ (line 264) | def __enter__(self):
method __exit__ (line 269) | def __exit__(self, *args):
class WandbWriter (line 284) | class WandbWriter(Writer):
method __init__ (line 287) | def __init__(self, log_dir: Path, experiment_name: str):
method write_image (line 290) | def write_image(self, name: str, image: TensorType["H", "W", "C"], ste...
method write_scalar (line 294) | def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], ...
method write_config (line 297) | def write_config(self, name: str, config_dict: Dict[str, Any], step: i...
class TensorboardWriter (line 309) | class TensorboardWriter(Writer):
method __init__ (line 312) | def __init__(self, log_dir: Path):
method write_image (line 315) | def write_image(self, name: str, image: TensorType["H", "W", "C"], ste...
method write_scalar (line 319) | def write_scalar(self, name: str, scalar: Union[float, torch.Tensor], ...
method write_config (line 322) | def write_config(self, name: str, config_dict: Dict[str, Any], step: i...
function _cursorup (line 331) | def _cursorup(x: int):
function _format_time (line 340) | def _format_time(seconds):
class LocalWriter (line 361) | class LocalWriter:
method __init__ (line 370) | def __init__(self, config: cfg.LocalWriterConfig, banner_messages: Opt...
method write_stats_log (line 381) | def write_stats_log(self, step: int) -> None:
method write_config (line 399) | def write_config(self, name: str, config_dict: Dict[str, Any], step: i...
method _consolidate_events (line 407) | def _consolidate_events(self):
method _update_header (line 418) | def _update_header(self, latest_map, new_key):
method _print_stats (line 438) | def _print_stats(self, latest_map, padding=" "):
FILE: AutoReconForDens3R/nerfstudio/viewer/app/public/electron.js
function createWindow (line 6) | function createWindow() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/run_deploy.py
function run_cmd (line 30) | def run_cmd(cmd: str):
function main (line 37) | def main(
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/App.jsx
function App (line 13) | function App() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/SceneNode.js
function dispose (line 5) | function dispose(object) {
class SceneNode (line 29) | class SceneNode {
method constructor (line 30) | constructor(object, scene_state) {
method get_scene_state (line 40) | get_scene_state() {
method add_child (line 44) | add_child(object) {
method create_child (line 50) | create_child(name) {
method find (line 57) | find(path) {
method find_object (line 69) | find_object(path) {
method find_no_create (line 73) | find_no_create(path) {
method find_object_no_create (line 85) | find_object_no_create(path) {
method set_property (line 89) | set_property(property, value) {
method set_transform (line 102) | set_transform(matrix) {
method set_object_from_path (line 112) | set_object_from_path(path, object) {
method set_object (line 116) | set_object(object) {
method dispose_recursive (line 124) | dispose_recursive() {
method delete (line 131) | delete(path) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Banner/Banner.jsx
function getParam (line 10) | function getParam(param_name) {
function Banner (line 21) | function Banner() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ConfigPanel/ConfigPanel.jsx
function RenderControls (line 9) | function RenderControls() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/LandingModal/LandingModal.jsx
function TabPanel (line 28) | function TabPanel(props: TabPanelProps) {
function a11yProps (line 47) | function a11yProps(index: number) {
function LandingModel (line 54) | function LandingModel(props: LandingModalProps) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/LogPanel/LogPanel.jsx
function LogPanel (line 8) | function LogPanel() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/RenderModal/RenderModal.jsx
function RenderModal (line 13) | function RenderModal(props: RenderModalProps) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Scene/Scene.jsx
constant SCENE_BOX_NAME (line 20) | const SCENE_BOX_NAME = 'Scene Box';
constant CAMERAS_NAME (line 21) | const CAMERAS_NAME = 'Training Cameras';
function get_scene_tree (line 23) | function get_scene_tree() {
function SceneTreeWebSocketListener (line 388) | function SceneTreeWebSocketListener() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Scene/drawing.js
function getCameraWireframe (line 48) | function getCameraWireframe(
function drawCameraImagePlane (line 103) | function drawCameraImagePlane(width, height, imageString, name) {
function transpose (line 116) | function transpose(matrix) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/CameraHelper.js
function setPoint (line 9) | function setPoint(point, pointMap, geometry, camera, x, y, z) {
class CameraHelper (line 27) | class CameraHelper extends THREE.Mesh {
method constructor (line 28) | constructor(camera, color = 0x000000) {
method update (line 99) | update() {
method set_visibility (line 138) | set_visibility(visible) {
method dispose (line 143) | dispose() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/CameraPanel.jsx
constant FOV_LABELS (line 52) | const FOV_LABELS = {
function set_camera_position (line 57) | function set_camera_position(camera, matrix) {
function FovSelector (line 63) | function FovSelector(props) {
function CameraList (line 198) | function CameraList(props) {
function CameraPanel (line 422) | function CameraPanel(props) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/curve.js
function get_catmull_rom_curve (line 5) | function get_catmull_rom_curve(list_of_3d_vectors, is_cycle, smoothness_...
function get_curve_object_from_cameras (line 17) | function get_curve_object_from_cameras(
function get_transform_matrix (line 69) | function get_transform_matrix(position, lookat, up) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/ExportPanel.jsx
function TabPanel (line 20) | function TabPanel(props: TabPanelProps) {
function a11yProps (line 40) | function a11yProps(index: number) {
constant CLIPPING_BOX_NAME (line 47) | const CLIPPING_BOX_NAME = 'Clipping Box';
function ExportPanel (line 49) | function ExportPanel(props) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/MeshSubPanel.jsx
function get_normal_outputs (line 9) | function get_normal_outputs(output_options) {
function MeshSubPanel (line 27) | function MeshSubPanel(props) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/PointcloudSubPanel.jsx
function PointcloudSubPanel (line 9) | function PointcloudSubPanel(props) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ScenePanel/ScenePanel.jsx
function MenuItems (line 47) | function MenuItems(props: ListItemProps) {
function ClickableList (line 191) | function ClickableList(props: ClickableListProps) {
function ScenePanel (line 207) | function ScenePanel(props) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/SidePanel.jsx
function TabPanel (line 47) | function TabPanel(props: TabPanelProps) {
function a11yProps (line 66) | function a11yProps(index: number) {
function BasicTabs (line 77) | function BasicTabs(props: BasicTabsProps) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/StatusPanel/StatusPanel.jsx
function StatusPanel (line 18) | function StatusPanel(props: StatusPanelProps) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ViewerWindow/ViewerWindow.jsx
function CameraToggle (line 17) | function CameraToggle() {
function TransformIcons (line 47) | function TransformIcons(props) {
function ViewerWindow (line 93) | function ViewerWindow(props) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ViewportControlsModal/ViewportControlsModal.jsx
function ControlsModal (line 7) | function ControlsModal() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/WebRtcWindow/WebRtcWindow.jsx
function WebRtcWindow (line 11) | function WebRtcWindow() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/WebSocket/WebSocket.jsx
function WebSocketContextFunction (line 13) | function WebSocketContextFunction({ children }) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/modules/WebSocketUrlField.jsx
function WebSocketUrlField (line 6) | function WebSocketUrlField() {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/reducer.js
function setData (line 54) | function setData(newState, state, path, data) {
function rootReducer (line 68) | function rootReducer(state = initialState, action) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/subscriber.js
function subscribe_to_changes (line 4) | function subscribe_to_changes(selector_fn, fn) {
FILE: AutoReconForDens3R/nerfstudio/viewer/app/src/utils.js
function split_path (line 1) | function split_path(path_str) {
FILE: AutoReconForDens3R/nerfstudio/viewer/server/path.py
class Path (line 24) | class Path:
method __init__ (line 33) | def __init__(self, entries: Tuple = tuple()):
method append (line 36) | def append(self, other: str) -> "Path":
method lower (line 50) | def lower(self):
method __hash__ (line 54) | def __hash__(self):
method __eq__ (line 57) | def __eq__(self, other):
FILE: AutoReconForDens3R/nerfstudio/viewer/server/server.py
class WebSocketHandler (line 35) | class WebSocketHandler(tornado.websocket.WebSocketHandler): # pylint: d...
method __init__ (line 38) | def __init__(self, *args, **kwargs):
method check_origin (line 42) | def check_origin(self, origin):
method open (line 46) | def open(self, *args: str, **kwargs: str):
method on_message (line 52) | async def on_message(self, message: bytearray): # pylint: disable=inv...
method on_close (line 88) | def on_close(self):
class ZMQWebSocketBridge (line 93) | class ZMQWebSocketBridge:
method __init__ (line 103) | def __init__(self, zmq_port: int, websocket_port: int, ip_address: str):
method __str__ (line 122) | def __str__(self) -> str:
method make_app (line 126) | def make_app(self):
method handle_zmq (line 130) | def handle_zmq(self, frames: List[bytes]):
method forward_to_websockets (line 157) | def forward_to_websockets(
method setup_zmq (line 172) | def setup_zmq(self, url: str):
method send_scene (line 184) | def send_scene(self, websocket: WebSocketHandler):
method run (line 196) | def run(self):
function run_viewer_bridge_server (line 201) | def run_viewer_bridge_server(
function entrypoint (line 228) | def entrypoint():
FILE: AutoReconForDens3R/nerfstudio/viewer/server/state/node.py
class Node (line 23) | class Node(defaultdict):
method __init__ (line 28) | def __init__(self, *args, **kwargs):
function get_tree (line 32) | def get_tree(node_class: Callable) -> Callable:
function find_node (line 44) | def find_node(tree, path):
function set_node_value (line 51) | def set_node_value(tree, path, value):
function walk (line 58) | def walk(path, tree):
FILE: AutoReconForDens3R/nerfstudio/viewer/server/state/state_node.py
class StateNode (line 20) | class StateNode(Node):
method __init__ (line 25) | def __init__(self, *args, **kwargs):
FILE: AutoReconForDens3R/nerfstudio/viewer/server/subprocess.py
function run_viewer_bridge_server_as_subprocess (line 36) | def run_viewer_bridge_server_as_subprocess(
FILE: AutoReconForDens3R/nerfstudio/viewer/server/utils.py
function get_chunks (line 28) | def get_chunks(
function three_js_perspective_camera_focal_length (line 50) | def three_js_perspective_camera_focal_length(fov: float, image_height: i...
function get_intrinsics_matrix_and_camera_to_world_h (line 65) | def get_intrinsics_matrix_and_camera_to_world_h(
function find_available_port (line 98) | def find_available_port(func: Callable, default_port: int, max_attempts:...
function force_codec (line 120) | def force_codec(pc: RTCPeerConnection, sender: RTCRtpSender, forced_code...
FILE: AutoReconForDens3R/nerfstudio/viewer/server/video_stream.py
class SingleFrameStreamTrack (line 22) | class SingleFrameStreamTrack(VideoStreamTrack):
method __init__ (line 25) | def __init__(self):
method put_frame (line 31) | def put_frame(self, frame: np.ndarray) -> None:
method recv (line 39) | async def recv(self):
FILE: AutoReconForDens3R/nerfstudio/viewer/server/viewer_utils.py
function get_viewer_version (line 64) | def get_viewer_version() -> str:
function setup_viewer (line 72) | def setup_viewer(config: cfg.ViewerConfig, log_filename: Path):
class OutputTypes (line 83) | class OutputTypes(str, enum.Enum):
class ColormapTypes (line 93) | class ColormapTypes(str, enum.Enum):
class IOChangeException (line 104) | class IOChangeException(Exception):
class SetTrace (line 108) | class SetTrace:
method __init__ (line 111) | def __init__(self, func):
method __enter__ (line 114) | def __enter__(self):
method __exit__ (line 118) | def __exit__(self, ext_type, exc_value, traceback):
class RenderThread (line 122) | class RenderThread(threading.Thread):
method __init__ (line 131) | def __init__(self, state: "ViewerState", graph: Model, camera_ray_bund...
method run (line 139) | def run(self):
method join (line 159) | def join(self, timeout=None):
class CheckThread (line 165) | class CheckThread(threading.Thread):
method __init__ (line 172) | def __init__(self, state):
method run (line 176) | def run(self):
class ViewerState (line 222) | class ViewerState:
method __init__ (line 229) | def __init__(self, config: cfg.ViewerConfig, log_filename: Path):
method _pick_drawn_image_idxs (line 283) | def _pick_drawn_image_idxs(self, total_num: int) -> list[int]:
method init_scene (line 299) | def init_scene(self, dataset: InputDataset, start_train=True) -> None:
method _check_camera_path_payload (line 341) | def _check_camera_path_payload(self, trainer, step: int):
method _check_webrtc_offer (line 354) | def _check_webrtc_offer(self):
method update_scene (line 374) | def update_scene(self, trainer, step: int, graph: Model, num_rays_per_...
method check_interrupt (line 438) | def check_interrupt(self, frame, event, arg): # pylint: disable=unuse...
method _get_camera_object (line 447) | def _get_camera_object(self):
method _apply_colormap (line 463) | def _apply_colormap(self, outputs: Dict[str, Any], colors: torch.Tenso...
method send_webrtc_answer (line 514) | async def send_webrtc_answer(self, data):
method set_image (line 565) | def set_image(self, image):
method _send_output_to_viewer (line 570) | def _send_output_to_viewer(self, outputs: Dict[str, Any], colors: torc...
method _update_viewer_stats (line 606) | def _update_viewer_stats(self, render_time: float, num_rays: int, imag...
method _calculate_image_res (line 638) | def _calculate_image_res(self, camera_object, is_training: bool) -> Op...
method _process_invalid_output (line 691) | def _process_invalid_output(self, output_type: str) -> str:
method _render_image_in_viewer (line 714) | def _render_image_in_viewer(self, camera_object, graph: Model, is_trai...
FILE: AutoReconForDens3R/nerfstudio/viewer/server/visualizer.py
class ViewerWindow (line 31) | class ViewerWindow:
method __init__ (line 41) | def __init__(self, zmq_port, ip_address="127.0.0.1"):
method send (line 48) | def send(self, command):
method send_ping (line 59) | def send_ping(self):
method timeout_ping (line 73) | def timeout_ping(self, timeout_in_sec: int = 15):
method assert_connected (line 94) | def assert_connected(self, timeout_in_sec: int = 15):
class Viewer (line 110) | class Viewer:
method __init__ (line 119) | def __init__(
method view_into (line 131) | def view_into(window: ViewerWindow, path: Path):
method __getitem__ (line 137) | def __getitem__(self, path):
method __repr__ (line 140) | def __repr__(self):
method write (line 143) | def write(self, data: Union[Dict, str, None] = None):
method read (line 148) | def read(self):
method delete (line 153) | def delete(self):
FILE: AutoReconForDens3R/scripts/blender/render_mesh_blender.py
function load_K_Rt_from_P (line 20) | def load_K_Rt_from_P(filename, P=None):
function load_all_poses (line 44) | def load_all_poses(cam_path, img_dir):
function parse_images_from_camera_dict (line 59) | def parse_images_from_camera_dict(camera_path, image_dir):
function load_object_aabb (line 73) | def load_object_aabb(object_anno_path, camera_anno_path) -> np.ndarray:
function set_background_color (line 85) | def set_background_color(color):
function parse_args (line 98) | def parse_args():
function compute_ground_poision (line 120) | def compute_ground_poision(obj_aabb):
function scale_aabb_to_unit_sphere (line 127) | def scale_aabb_to_unit_sphere(all_poses, obj_aabb):
function main (line 135) | def main():
FILE: AutoReconForDens3R/scripts/completions/install.py
function _check_tyro_cli (line 38) | def _check_tyro_cli(script_path: pathlib.Path) -> bool:
function _generate_completion (line 73) | def _generate_completion(
function _exclamation (line 128) | def _exclamation() -> str:
function _update_rc (line 132) | def _update_rc(
function main (line 194) | def main(mode: ConfigureMode = "install") -> None:
function entrypoint (line 281) | def entrypoint():
FILE: AutoReconForDens3R/scripts/datasets/extract_monocular_cues.py
function standardize_depth_map (line 122) | def standardize_depth_map(img, mask_valid=None, trunc_value=0.1):
function save_outputs (line 142) | def save_outputs(img_path, output_file_name):
FILE: AutoReconForDens3R/scripts/datasets/process_nerfstudio_to_sdfstudio.py
function main (line 15) | def main():
FILE: AutoReconForDens3R/scripts/datasets/process_neuralrgbd_to_sdfstudio.py
function alphanum_key (line 31) | def alphanum_key(s):
function load_poses (line 38) | def load_poses(posefile):
FILE: AutoReconForDens3R/scripts/datasets/process_nuscenes_masks.py
class ProcessNuScenesMasks (line 17) | class ProcessNuScenesMasks:
method main (line 41) | def main(self) -> None:
function entrypoint (line 124) | def entrypoint():
FILE: AutoReconForDens3R/scripts/docs/add_nb_tags.py
function main (line 13) | def main(check: bool = False):
FILE: AutoReconForDens3R/scripts/docs/build_docs.py
function run_command (line 15) | def run_command(command: str) -> None:
function main (line 27) | def main(clean_cache: bool = False):
FILE: AutoReconForDens3R/scripts/eval.py
class ComputePSNR (line 20) | class ComputePSNR:
method main (line 28) | def main(self) -> None:
function entrypoint (line 46) | def entrypoint():
FILE: AutoReconForDens3R/scripts/eval_mask.py
function evenly_sample_elems (line 20) | def evenly_sample_elems(elems, n_samples):
function read_mask (line 31) | def read_mask(mask_path):
function resize_mask (line 40) | def resize_mask(gt_mask, pred_mask, max_side_length=None):
function mask_to_boundary (line 69) | def mask_to_boundary(mask, dilation_ratio=0.02):
function boundary_iou (line 91) | def boundary_iou(gt, dt, dilation_ratio=0.02, vis_path=None, img_path=No...
function mask_iou (line 107) | def mask_iou(gt, dt, vis_path=None, img_path=None):
class EvalMask (line 136) | class EvalMask:
method main (line 147) | def main(self) -> None:
method parse_filenames (line 180) | def parse_filenames(self, gt_filenames, pred_filenames):
method eval_frame (line 194) | def eval_frame(self, fn):
method gather_results (line 229) | def gather_results(self, all_metrics):
function entrypoint (line 255) | def entrypoint():
FILE: AutoReconForDens3R/scripts/exporter.py
class Exporter (line 32) | class Exporter:
class ExportPointCloud (line 42) | class ExportPointCloud(Exporter):
method main (line 66) | def main(self) -> None:
class ExportTSDFMesh (line 100) | class ExportTSDFMesh(Exporter):
method main (line 141) | def main(self) -> None:
method _config_override_fn (line 194) | def _config_override_fn(self, config):
class ExportPoissonMesh (line 207) | class ExportPoissonMesh(Exporter):
method validate_pipeline (line 252) | def validate_pipeline(self, pipeline: Pipeline) -> None:
method main (line 277) | def main(self) -> None:
method _config_override_fn (line 354) | def _config_override_fn(self, config):
class ExportMarchingCubesMesh (line 361) | class ExportMarchingCubesMesh(Exporter):
method main (line 367) | def main(self) -> None:
function entrypoint (line 380) | def entrypoint():
FILE: AutoReconForDens3R/scripts/extract_mesh.py
class ExtractMesh (line 36) | class ExtractMesh:
method main (line 100) | def main(self) -> None:
method extract_sdf (line 159) | def extract_sdf(self, pipeline, x):
method _remove_internal_geometry (line 166) | def _remove_internal_geometry(self, output_path: Path) -> Path:
method _remove_internal_geometry_igl (line 176) | def _remove_internal_geometry_igl(
method _remove_internal_geometry_meshlab (line 192) | def _remove_internal_geometry_meshlab(self, output_path: Path) -> Path:
method _remove_isolated_components (line 206) | def _remove_isolated_components(self, output_path: Path) -> Path:
method _fix_mesh (line 230) | def _fix_mesh(self, output_path: Path) -> Path:
method _extract_texture (line 253) | def _extract_texture(self, pipeline, mesh_path: Path) -> Path:
method _config_override_fn (line 268) | def _config_override_fn(self, config): # TODO: make optional
function entrypoint (line 288) | def entrypoint():
FILE: AutoReconForDens3R/scripts/extract_volume.py
class ExtractVolume (line 24) | class ExtractVolume:
method main (line 46) | def main(self) -> None:
method _config_override_fn (line 78) | def _config_override_fn(self, config):
method extract_seg_volume (line 87) | def extract_seg_volume(self, pipeline, volume_extractor):
method extract_sdf_volume (line 91) | def extract_sdf_volume(self, pipeline, volume_extractor):
function extract_volume (line 103) | def extract_volume(
function postprocess_volume_for_mrc (line 156) | def postprocess_volume_for_mrc(volume, volume_type):
function compute_volume_and_pad_val (line 167) | def compute_volume_and_pad_val(volume: np.ndarray, volume_type: str):
function entrypoint (line 182) | def entrypoint():
FILE: AutoReconForDens3R/scripts/github/run_actions.py
function run_command (line 16) | def run_command(command: str, continue_on_fail: bool = False) -> bool:
function run_github_actions_file (line 31) | def run_github_actions_file(filename: str, continue_on_fail: bool = False):
function run_code_checks (line 77) | def run_code_checks(continue_on_fail: bool = False):
function entrypoint (line 89) | def entrypoint():
FILE: AutoReconForDens3R/scripts/heritage_to_nerfstudio.py
class CameraModel (line 24) | class CameraModel(Enum):
function show_result (line 42) | def show_result(seg):
class Renderer (line 52) | class Renderer:
method __init__ (line 53) | def __init__(self, height=480, width=640):
method __call__ (line 58) | def __call__(self, height, width, intrinsics, pose, mesh):
method fix_pose (line 70) | def fix_pose(self, pose):
method mesh_opengl (line 80) | def mesh_opengl(self, mesh):
method delete (line 83) | def delete(self):
function colmap_to_json (line 87) | def colmap_to_json(
FILE: AutoReconForDens3R/scripts/preprocess/preprocess_neus_pose.py
class PreprocessNeusPose (line 22) | class PreprocessNeusPose:
method main (line 28) | def main(self):
method load_camera_dict (line 46) | def load_camera_dict(self):
method renormalize_scene (line 55) | def renormalize_scene(self, camera_dict):
method _parse_images_from_camera_dict (line 63) | def _parse_images_from_camera_dict(self):
FILE: AutoReconForDens3R/scripts/process_data.py
class ProcessImages (line 33) | class ProcessImages:
method main (line 83) | def main(self) -> None:
class ProcessVideo (line 156) | class ProcessVideo:
method main (line 209) | def main(self) -> None:
class ProcessInsta360 (line 281) | class ProcessInsta360:
method main (line 316) | def main(self) -> None:
class ProcessRecord3D (line 400) | class ProcessRecord3D:
method main (line 423) | def main(self) -> None:
class ProcessPolycam (line 477) | class ProcessPolycam:
method main (line 511) | def main(self) -> None:
class ProcessMetashape (line 616) | class ProcessMetashape:
method main (line 643) | def main(self) -> None:
function entrypoint (line 723) | def entrypoint():
FILE: AutoReconForDens3R/scripts/render.py
function _save_gt_video (line 44) | def _save_gt_video(
function _render_trajectory_video (line 78) | def _render_trajectory_video(
function save_frame (line 157) | def save_frame(
function save_video (line 183) | def save_video(
class RenderTrajectory (line 213) | class RenderTrajectory:
method main (line 252) | def main(self) -> None:
method _config_override_fn (line 305) | def _config_override_fn(self, config):
function entrypoint (line 319) | def entrypoint():
FILE: AutoReconForDens3R/scripts/render_mesh.py
function _render_trajectory_video (line 33) | def _render_trajectory_video(
class RenderTrajectory (line 150) | class RenderTrajectory:
method main (line 177) | def main(self) -> None:
function entrypoint (line 228) | def entrypoint():
FILE: AutoReconForDens3R/scripts/texture.py
class TextureMesh (line 23) | class TextureMesh:
method main (line 43) | def main(self) -> None:
function entrypoint (line 68) | def entrypoint():
FILE: AutoReconForDens3R/scripts/train.py
function _find_free_port (line 66) | def _find_free_port() -> str:
function _set_random_seed (line 75) | def _set_random_seed(seed) -> None:
function train_loop (line 82) | def train_loop(local_rank: int, world_size: int, config: cfg.Config, glo...
function _distributed_worker (line 96) | def _distributed_worker(
function launch (line 152) | def launch(
function main (line 223) | def main(config: cfg.Config) -> None:
function entrypoint (line 250) | def entrypoint():
FILE: AutoReconForDens3R/scripts/viewer/view_dataset.py
function main (line 27) | def main(
FILE: AutoReconForDens3R/tests/cameras/test_cameras.py
function test_pinhole_camera (line 110) | def test_pinhole_camera():
function test_equirectangular_camera (line 124) | def test_equirectangular_camera():
function test_camera_as_tensordataclass (line 162) | def test_camera_as_tensordataclass():
function check_generate_rays_shape (line 204) | def check_generate_rays_shape():
function _check_dataclass_allclose (line 281) | def _check_dataclass_allclose(ipt, other):
function _check_cam_shapes (line 288) | def _check_cam_shapes(cam: Cameras, _batch_size):
FILE: AutoReconForDens3R/tests/cameras/test_rays.py
function test_frustum_get_position (line 11) | def test_frustum_get_position():
function test_frustum_get_gaussian_blob (line 33) | def test_frustum_get_gaussian_blob():
function test_frustum_apply_masks (line 49) | def test_frustum_apply_masks():
function test_get_mock_frustum (line 69) | def test_get_mock_frustum():
FILE: AutoReconForDens3R/tests/field_components/test_embedding.py
function test_indexing (line 7) | def test_indexing():
FILE: AutoReconForDens3R/tests/field_components/test_encodings.py
function test_scaling_and_offset (line 10) | def test_scaling_and_offset():
function test_nerf_encoder (line 28) | def test_nerf_encoder():
function test_rff_encoder (line 57) | def test_rff_encoder():
function test_tensor_vm_encoder (line 77) | def test_tensor_vm_encoder():
function test_tensor_cp_encoder (line 101) | def test_tensor_cp_encoder():
function test_tensor_sh_encoder (line 124) | def test_tensor_sh_encoder():
function test_tensor_hash_encoder (line 142) | def test_tensor_hash_encoder():
FILE: AutoReconForDens3R/tests/field_components/test_field_outputs.py
function test_field_output (line 17) | def test_field_output():
function test_density_output (line 36) | def test_density_output():
function test_rgb_output (line 46) | def test_rgb_output():
function test_sh_output (line 56) | def test_sh_output():
FILE: AutoReconForDens3R/tests/field_components/test_fields.py
function test_tcnn_instant_ngp_field (line 10) | def test_tcnn_instant_ngp_field():
FILE: AutoReconForDens3R/tests/field_components/test_mlp.py
function test_mlp (line 10) | def test_mlp():
FILE: AutoReconForDens3R/tests/field_components/test_temporal_distortions.py
function test_dnerf_distortion (line 9) | def test_dnerf_distortion():
FILE: AutoReconForDens3R/tests/model_components/test_ray_sampler.py
function test_uniform_sampler (line 18) | def test_uniform_sampler():
function test_lin_disp_sampler (line 37) | def test_lin_disp_sampler():
function test_sqrt_sampler (line 54) | def test_sqrt_sampler():
function test_log_sampler (line 71) | def test_log_sampler():
function test_pdf_sampler (line 88) | def test_pdf_sampler():
FILE: AutoReconForDens3R/tests/model_components/test_renderers.py
function test_rgb_renderer (line 11) | def test_rgb_renderer():
function test_sh_renderer (line 28) | def test_sh_renderer():
function test_acc_renderer (line 46) | def test_acc_renderer():
function test_depth_renderer (line 59) | def test_depth_renderer():
FILE: AutoReconForDens3R/tests/test_train.py
function set_reduced_config (line 20) | def set_reduced_config(config: Config):
function test_train (line 49) | def test_train():
FILE: AutoReconForDens3R/tests/utils/test_poses.py
function test_to4x4 (line 10) | def test_to4x4():
function test_multiply (line 24) | def test_multiply():
function test_inverse (line 60) | def test_inverse():
function test_normalize (line 84) | def test_normalize():
FILE: AutoReconForDens3R/tests/utils/test_tensor_dataclass.py
class DummyNestedClass (line 14) | class DummyNestedClass(TensorDataclass):
class DummyTensorDataclass (line 21) | class DummyTensorDataclass(TensorDataclass):
function test_init (line 30) | def test_init():
function test_broadcasting (line 44) | def test_broadcasting():
function test_tensor_ops (line 64) | def test_tensor_ops(): # pylint: disable=(too-many-statements)
function test_nested_class (line 136) | def test_nested_class():
function test_iter (line 169) | def test_iter():
FILE: AutoReconForDens3R/tests/utils/test_visualization.py
function test_apply_colormap (line 9) | def test_apply_colormap():
function test_apply_depth_colormap (line 19) | def test_apply_depth_colormap():
function test_apply_boolean_colormap (line 31) | def test_apply_boolean_colormap():
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/cli/inference_transformer.py
class Config (line 34) | class Config:
function _update_config (line 61) | def _update_config(cfg):
function main (line 68) | def main(config: Config):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/preprocess.py
class SfMPreprocessConfig (line 33) | class SfMPreprocessConfig:
class SfMPreprocessWorker (line 76) | class SfMPreprocessWorker:
method __init__ (line 77) | def __init__(self, cfg: DictConfig, inst_rel_dir: str, device: str = "...
method _init_paths (line 91) | def _init_paths(self, inst_rel_dir: str):
method _handle_resume (line 124) | def _handle_resume(self) -> bool:
method _sanity_check (line 133) | def _sanity_check(self) -> None:
method update_thresholds (line 154) | def update_thresholds(self):
method __call__ (line 178) | def __call__(self) -> Optional[Dict[str, Any]]:
method parse_poses (line 205) | def parse_poses(self, data):
method build_neural_ptcd (line 208) | def build_neural_ptcd(self, data):
method stat_outlier_removal (line 226) | def stat_outlier_removal(self, data):
method voxel_downsample (line 232) | def voxel_downsample(self, data):
method filter_pts_behind_cameras (line 235) | def filter_pts_behind_cameras(self, data):
method filter_ground_plane_pts (line 238) | def filter_ground_plane_pts(self, data):
method save_transformer_inference (line 241) | def save_transformer_inference(self):
method save_ncut_inference (line 247) | def save_ncut_inference(self):
function ray_wrapper (line 255) | def ray_wrapper(cfg, task_id=None, **kwargs) -> Tuple[bool, str, Optiona...
class SfMPreprocess (line 272) | class SfMPreprocess:
method __init__ (line 273) | def __init__(self, cfg: DictConfig):
method __call__ (line 280) | def __call__(self) -> Optional[Dict[str, Any]]:
method setup_logger (line 312) | def setup_logger(self):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/dataset/utils.py
function normalize_pointcloud (line 15) | def normalize_pointcloud(data):
function to_numpy (line 43) | def to_numpy(obj):
function move_to (line 67) | def move_to(obj, device):
function build_inference_batch (line 86) | def build_inference_batch(
function compute_average_camera_position (line 124) | def compute_average_camera_position(poses: Dict[str, Dict[str, np.ndarra...
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/lightning/module.py
function if_none_then_true (line 29) | def if_none_then_true(x):
class SfMTransformerConfig (line 34) | class SfMTransformerConfig:
class SfMTransformer (line 63) | class SfMTransformer(pl.LightningModule):
method __init__ (line 64) | def __init__(self, cfg: SfMTransformerConfig):
method forward (line 73) | def forward(self, data):
method inference (line 79) | def inference(self, batch):
method test_val_step (line 82) | def test_val_step(self, batch, batch_idx, mode="test"):
method validation_step (line 106) | def validation_step(self, batch, batch_idx):
method test_step (line 109) | def test_step(self, batch, batch_idx):
method build_aux_attrs (line 112) | def build_aux_attrs(self, batch, mode: Literal["train", "test", "val"]):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/modeling/encoding.py
function expected_sin (line 27) | def expected_sin(x_means: torch.Tensor, x_vars: torch.Tensor) -> torch.T...
class FieldComponent (line 39) | class FieldComponent(nn.Module):
method __init__ (line 46) | def __init__(self, in_dim: Optional[int] = None, out_dim: Optional[int...
method build_nn_modules (line 51) | def build_nn_modules(self) -> None:
method set_in_dim (line 55) | def set_in_dim(self, in_dim: int) -> None:
method get_out_dim (line 64) | def get_out_dim(self) -> int:
method forward (line 71) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class Encoding (line 80) | class Encoding(FieldComponent):
method __init__ (line 87) | def __init__(self, in_dim: int) -> None:
method forward (line 93) | def forward(self, in_tensor: TensorType["bs":..., "input_dim"]) -> Ten...
class NeRFEncoding (line 102) | class NeRFEncoding(Encoding):
method __init__ (line 114) | def __init__(
method get_out_dim (line 124) | def get_out_dim(self) -> int:
method forward (line 132) | def forward(
class RFFEncoding (line 165) | class RFFEncoding(Encoding):
method __init__ (line 175) | def __init__(self, in_dim: int, num_frequencies: int, scale: float, in...
method get_out_dim (line 188) | def get_out_dim(self) -> int:
method forward (line 191) | def forward(
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/modeling/point_transformer.py
class PointTransformer (line 14) | class PointTransformer(nn.Module):
method __init__ (line 15) | def __init__(
method _init_weights (line 124) | def _init_weights(self, m):
method interpolate_pos_encoding (line 133) | def interpolate_pos_encoding(self, x: Tensor):
method prepare_tokens (line 157) | def prepare_tokens(self, xyz: Tensor, feat: Tensor, cls_token: Optiona...
method forward (line 190) | def forward(self, xyz: Tensor, feat: Tensor, cls_token: Optional[Tenso...
class VoxelEmbedding (line 198) | class VoxelEmbedding(nn.Module):
method __init__ (line 199) | def __init__(self, voxel_resolution=128, in_chans=384, embed_dim=384):
method forward (line 205) | def forward(self, xyz: Tensor, feat: Tensor):
class PosFeatMerger (line 211) | class PosFeatMerger(nn.Module):
method __init__ (line 212) | def __init__(self, pos_dim, feat_dim, hidden_dim, out_dim, act_layer=n...
method forward (line 218) | def forward(self, pos, feat):
class TokenSegHead (line 222) | class TokenSegHead(nn.Module):
method __init__ (line 223) | def __init__(self, use_softmax=False, temperature=1.0, num_classes=2):
method forward (line 229) | def forward(self, tokens: Tensor, cls_tokens: Tensor) -> Tensor:
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/modeling/utils.py
function _no_grad_trunc_normal_ (line 9) | def _no_grad_trunc_normal_(tensor, mean, std, a, b):
function trunc_normal_ (line 46) | def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
function elu_feature_map (line 51) | def elu_feature_map(x):
class LinearAttention (line 55) | class LinearAttention(nn.Module):
method __init__ (line 56) | def __init__(self, eps=1e-6):
method forward (line 61) | def forward(self, queries, keys, values):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/modeling/vision_transformer.py
function drop_path (line 27) | def drop_path(x, drop_prob: float = 0.0, training: bool = False):
class DropPath (line 38) | class DropPath(nn.Module):
method __init__ (line 41) | def __init__(self, drop_prob=None):
method forward (line 45) | def forward(self, x):
class Mlp (line 49) | class Mlp(nn.Module):
method __init__ (line 50) | def __init__(self, in_features, hidden_features=None, out_features=Non...
method forward (line 59) | def forward(self, x):
class Attention (line 68) | class Attention(nn.Module):
method __init__ (line 69) | def __init__(
method forward (line 86) | def forward(self, x):
class Block (line 105) | class Block(nn.Module):
method __init__ (line 106) | def __init__(
method forward (line 136) | def forward(self, x, return_attention=False):
class PatchEmbed (line 145) | class PatchEmbed(nn.Module):
method __init__ (line 148) | def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=...
method forward (line 157) | def forward(self, x):
class VisionTransformer (line 163) | class VisionTransformer(nn.Module):
method __init__ (line 166) | def __init__(
method _init_weights (line 222) | def _init_weights(self, m):
method interpolate_pos_encoding (line 231) | def interpolate_pos_encoding(self, x, w, h):
method prepare_tokens (line 253) | def prepare_tokens(self, x):
method forward (line 266) | def forward(self, x):
method get_last_selfattention (line 273) | def get_last_selfattention(self, x):
method get_intermediate_layers (line 282) | def get_intermediate_layers(self, x, n=1):
function vit_tiny (line 293) | def vit_tiny(patch_size=16, **kwargs):
function vit_small (line 307) | def vit_small(patch_size=16, **kwargs):
function vit_base (line 321) | def vit_base(patch_size=16, **kwargs):
class DINOHead (line 335) | class DINOHead(nn.Module):
method __init__ (line 336) | def __init__(
method _init_weights (line 361) | def _init_weights(self, m):
method forward (line 367) | def forward(self, x):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/utils/postprocess.py
function _to_numpy (line 35) | def _to_numpy(x: Union[np.ndarray, Tensor]) -> Tuple[np.ndarray, Any]:
function _to_tensor (line 45) | def _to_tensor(x: Union[np.ndarray, Tensor], device: str) -> Tensor:
function extract_max_cluster (line 49) | def extract_max_cluster(
function remove_outliers (line 73) | def remove_outliers(
function segment_fg_supporting_plane (line 97) | def segment_fg_supporting_plane(
function extract_object_centric_fg_aabb (line 220) | def extract_object_centric_fg_aabb(
class PostprocessConfig (line 264) | class PostprocessConfig:
class Postprocess (line 318) | class Postprocess:
method __init__ (line 319) | def __init__(self, cfg: PostprocessConfig):
method __call__ (line 325) | def __call__(
method run_euclidean_clustering (line 401) | def run_euclidean_clustering(self):
method run_outlier_removal (line 424) | def run_outlier_removal(self):
method run_plane_alignment (line 447) | def run_plane_alignment(self):
method _segment_support_plane (line 460) | def _segment_support_plane(self):
method _align_world_with_plane (line 474) | def _align_world_with_plane(self):
method compute_object_centric_transformation (line 492) | def compute_object_centric_transformation(self):
method build_fg_aabb (line 517) | def build_fg_aabb(self):
method _extract_object_centric_fg_aabb (line 540) | def _extract_object_centric_fg_aabb(self) -> Float[Tensor, "batch 8 3"]:
method extract_bbox_no_postprocess (line 557) | def extract_bbox_no_postprocess(
method compute_full_decomposition (line 571) | def compute_full_decomposition(self) -> Dict[str, Dict[str, Any]]:
method set_bg_in_bbox_to_fg (line 607) | def set_bg_in_bbox_to_fg(self, fg_pts, bg_pts, fg_aabb_min_max_info):
method _build_decomposed_pts (line 629) | def _build_decomposed_pts(self, b_id: int, decomp_results: Dict[str, A...
method _build_decomposed_aux_attrs (line 687) | def _build_decomposed_aux_attrs(self, b_id: int, decomp_results: Dict[...
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/decomp/transformer/utils/saving.py
class SfMTransformerSaveConfig (line 17) | class SfMTransformerSaveConfig:
class SaveResults (line 41) | class SaveResults:
method __init__ (line 42) | def __init__(self, cfg: SfMTransformerSaveConfig):
method _sanity_check (line 46) | def _sanity_check(self):
method save_annotations (line 58) | def save_annotations(self, batch: Dict[str, Any], all_decomp_results: ...
method _compute_transform_new_object_frame (line 106) | def _compute_transform_new_object_frame(
method _build_object_anno (line 128) | def _build_object_anno(
method _build_pose_anno (line 160) | def _build_pose_anno(
method visualize_object_anno (line 197) | def visualize_object_anno(self, object_anno: Dict[str, Any], save_path...
method save_decomp_visualization (line 230) | def save_decomp_visualization(self):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/feature_extraction/dino_vit/extract_features.py
class DINOExtractionConfig (line 26) | class DINOExtractionConfig:
method __post_init__ (line 50) | def __post_init__(self):
function resize_max_area (line 69) | def resize_max_area(h, w, max_area):
function get_sfm_dir (line 77) | def get_sfm_dir(args, inst_rel_dir):
function _save_feat_dirname_to_cache (line 91) | def _save_feat_dirname_to_cache(args, inst_rel_dir, feat_dirname):
function extract_single (line 103) | def extract_single(args, inst_rel_dir):
function ray_wrapper (line 168) | def ray_wrapper(*args, worker=None, task_id=None, **kwargs):
function main (line 181) | def main(dino_extraction: DictConfig):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/feature_extraction/dino_vit/vit_extractor.py
class ViTExtractor (line 25) | class ViTExtractor:
method __init__ (line 37) | def __init__(
method create_model (line 74) | def create_model(model_type: str) -> nn.Module:
method _fix_pos_enc (line 99) | def _fix_pos_enc(patch_size: int, stride_hw: Tuple[int, int]):
method patch_vit_resolution (line 139) | def patch_vit_resolution(model: nn.Module, stride: int) -> nn.Module:
method preprocess (line 161) | def preprocess(
method _get_hook (line 179) | def _get_hook(self, facet: str):
method _register_hooks (line 206) | def _register_hooks(self, layers: List[int], facet: str) -> None:
method _unregister_hooks (line 223) | def _unregister_hooks(self) -> None:
method _extract_features (line 231) | def _extract_features(
method _log_bin (line 259) | def _log_bin(self, x: torch.Tensor, hierarchy: int = 2) -> torch.Tensor:
method extract_descriptors (line 308) | def extract_descriptors(
method extract_saliency_maps (line 369) | def extract_saliency_maps(self, batch: torch.Tensor) -> torch.Tensor:
method reshape_descriptor (line 390) | def reshape_descriptor(self, descriptor: torch.Tensor, bin: bool = Fal...
function extract_single (line 402) | def extract_single(args):
function str2bool (line 454) | def str2bool(v):
function parse_args (line 465) | def parse_args():
function main (line 512) | def main():
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/sfm/colmap_from_idr.py
class IDR2COLMAPConfig (line 20) | class IDR2COLMAPConfig:
function load_K_Rt_from_P (line 34) | def load_K_Rt_from_P(filename, P=None):
function parse_idr_poses (line 58) | def parse_idr_poses(pose_path, image_dir) -> Dict[str, Dict[str, np.ndar...
function process_instance (line 78) | def process_instance(cfg, inst_dirname):
function main (line 115) | def main(idr2colmap: IDR2COLMAPConfig):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/sfm/pairs_from_sequential.py
function read_image_pairs (line 19) | def read_image_pairs(path):
function pairs_from_file (line 25) | def pairs_from_file(image_list: List[Path], pairs_path: Path) -> List[Tu...
function build_loop_detection_pairs (line 38) | def build_loop_detection_pairs(
function main (line 74) | def main(
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/sfm/sfm.py
class SfMConfig (line 37) | class SfMConfig:
class SparseReconActor (line 160) | class SparseReconActor:
method __init__ (line 161) | def __init__(self, task_queue, return_queue, share_intrinsics=True, ma...
method run (line 168) | def run(self):
method _run (line 176) | def _run(self, task_inst_id):
method sparse_recon (line 207) | def sparse_recon(self, task):
method triangulate (line 225) | def triangulate(self, task):
method manhattan_alignment (line 244) | def manhattan_alignment(self, task):
function extract_and_match (line 277) | def extract_and_match(task): # FeatureTask
class FeatureActor (line 288) | class FeatureActor:
method __init__ (line 289) | def __init__(self, feature_queue: Queue, recon_queue: Queue):
method run (line 293) | def run(self):
method _run_task (line 305) | def _run_task(self, task):
method _extract_and_match (line 315) | def _extract_and_match(self, task): # FeatureTask
method _run_loftr (line 322) | def _run_loftr(self, task): # FeatureTask
function read_write_cache (line 340) | def read_write_cache(
function evenly_sample_images (line 374) | def evenly_sample_images(images, n_samples):
function parse_images_from_reference_model (line 386) | def parse_images_from_reference_model(sfm_ref_dir):
function plot_reconstruction (line 393) | def plot_reconstruction(rec, outputs_dir, show_axes=False, suffix=None):
function manhattan_alignment (line 405) | def manhattan_alignment(image_dir, sfm_dir, output_dir):
function reconstruct_instance (line 423) | def reconstruct_instance(
function reconstruct_instance_wrapper (line 552) | def reconstruct_instance_wrapper(cli_args, *args, **kwargs):
function main (line 571) | def main(sfm: SfMConfig):
function dens3r_recon (line 636) | def dens3r_recon(sfm: SfMConfig):
function postprocess_dens3r (line 694) | def postprocess_dens3r(sfm: SfMConfig, sfm_dir_name: str):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/aggregation.py
function multiview_feature_extraction (line 13) | def multiview_feature_extraction(
function multiview_feature_aggregation (line 97) | def multiview_feature_aggregation(p3Ds_xyz, p3Ds_mv_feats, mode="mean", ...
function gini (line 130) | def gini(array, eps=1e-7):
function gini_feature_aggregation (line 153) | def gini_feature_aggregation(mv_feats, mode="max"):
function affinity_feature_aggregation (line 166) | def affinity_feature_aggregation(mv_feats, mode):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/cli.py
function str2bool (line 4) | def str2bool(v):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/colmap.py
function read_extrinsics (line 5) | def read_extrinsics(rec: pycolmap.Reconstruction):
function parse_poses (line 16) | def parse_poses(rec):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/box3d.py
function min_max_to_vertices_torch (line 33) | def min_max_to_vertices_torch(box: Tensor) -> Tensor:
function min_max_to_vertices (line 48) | def min_max_to_vertices(box):
function _vertices_to_min_max (line 61) | def _vertices_to_min_max(box):
function _center_extent_to_vertices (line 66) | def _center_extent_to_vertices(box): # extent:=half_extent
function _vertices_to_center_extent (line 71) | def _vertices_to_center_extent(box):
function _center_extent_to_min_max (line 78) | def _center_extent_to_min_max(box):
function convert_box_format (line 95) | def convert_box_format(box, src_format, tgt_format):
function compute_box3d_iou (line 109) | def compute_box3d_iou(
function normalize (line 131) | def normalize(array: np.ndarray, axis: int = -1):
function extend_aabb (line 137) | def extend_aabb(
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/pointcloud/convert.py
function open3d_ptcd_from_numpy (line 7) | def open3d_ptcd_from_numpy(
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/pointcloud/misc.py
function compute_knn_distance_threshold (line 13) | def compute_knn_distance_threshold(
function compute_world_to_object_transformation (line 35) | def compute_world_to_object_transformation(
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/pointcloud/plane.py
function fit_plane (line 18) | def fit_plane(points: np.ndarray, method: Literal["OLS", "TLS"] = "OLS",...
function fit_plane_ols (line 30) | def fit_plane_ols(points: np.ndarray):
function fit_plane_tls (line 46) | def fit_plane_tls(points: np.ndarray):
function fit_plane_tls_torch (line 59) | def fit_plane_tls_torch(points: np.ndarray, device="cuda", max_n_points=...
function point_to_plane_dist (line 86) | def point_to_plane_dist(points: Float[np.ndarray, "N 3"], plane: Float[n...
function ground_plane_alignment (line 94) | def ground_plane_alignment(
function ground_plane_alignment_simple (line 152) | def ground_plane_alignment_simple(plane: Plane, source_vector: np.ndarra...
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/pointcloud/pointcloud.py
function build_color_ptcd_from_colmap (line 26) | def build_color_ptcd_from_colmap(
function build_feature_ptcd (line 53) | def build_feature_ptcd(
class NeuralPointCloud (line 128) | class NeuralPointCloud:
method __init__ (line 131) | def __init__(
method from_pycolmap_rec (line 158) | def from_pycolmap_rec(
method statistical_outlier_removal (line 194) | def statistical_outlier_removal(self, n_neighbors, std_ratio):
method to_open3d_ptcd (line 199) | def to_open3d_ptcd(self): # points only, no attributes
method update_ptcd_with_indices (line 202) | def update_ptcd_with_indices(self, inds):
method update_ptcd_with_mask (line 208) | def update_ptcd_with_mask(self):
method visualize_ptcd (line 211) | def visualize_ptcd(self, base_name):
method _draw_color_ptcd (line 216) | def _draw_color_ptcd(self, base_name):
method _draw_feature_ptcd (line 225) | def _draw_feature_ptcd(self, base_name):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/pointcloud/segmentation.py
function euclidean_clustering (line 17) | def euclidean_clustering(
function segment_planes (line 65) | def segment_planes(
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/geometry/transform.py
function rotmat (line 4) | def rotmat(a, b):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/misc.py
class EarlyTermination (line 2) | class EarlyTermination(Exception):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/ray.py
function is_ray_environment (line 4) | def is_ray_environment():
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/tqdm.py
function tqdm (line 7) | def tqdm(*args, **kwargs):
FILE: AutoReconForDens3R/third_party/AutoDecomp/auto_decomp/utils/viz_3d.py
function to_homogeneous (line 19) | def to_homogeneous(points):
function init_figure (line 24) | def init_figure(
function plot_points (line 65) | def plot_points(
function plot_mesh (line 98) | def plot_mesh(
function plot_cube (line 124) | def plot_cube(
function plot_sphere (line 185) | def plot_sphere(
function plot_camera (line 216) | def plot_camera(
function plot_camera_colmap (line 281) | def plot_camera_colmap(
function plot_cameras (line 295) | def plot_cameras(fig: go.Figure, reconstruction: pycolmap.Reconstruction...
function plot_coordinate_frames (line 301) | def plot_coordinate_frames(
function plot_reconstruction (line 344) | def plot_reconstruction(
function save_fig (line 382) | def save_fig(fig, save_dir: Path, fig_name: str, mode: Union[str, Plotly...
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/colmap_from_nvm.py
function recover_database_images_and_ids (line 13) | def recover_database_images_and_ids(database_path):
function quaternion_to_rotation_matrix (line 27) | def quaternion_to_rotation_matrix(qvec):
function camera_center_to_translation (line 37) | def camera_center_to_translation(c, qvec):
function read_nvm_model (line 42) | def read_nvm_model(
function main (line 165) | def main(nvm, intrinsics, database, output, skip_points=False):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extract_features.py
function resize_image (line 145) | def resize_image(image, size, interp):
class ImageDataset (line 163) | class ImageDataset(torch.utils.data.Dataset):
method __init__ (line 172) | def __init__(self, root, conf, paths=None):
method __getitem__ (line 199) | def __getitem__(self, idx):
method __len__ (line 223) | def __len__(self):
function main (line 228) | def main(conf: Dict,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/cosplace.py
class CosPlace (line 23) | class CosPlace(BaseModel):
method _init (line 29) | def _init(self, conf):
method _forward (line 41) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/d2net.py
class D2Net (line 14) | class D2Net(BaseModel):
method _init (line 23) | def _init(self, conf):
method _forward (line 36) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/dir.py
class DIR (line 25) | class DIR(BaseModel):
method _init (line 43) | def _init(self, conf):
method _forward (line 59) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/disk.py
class DISK (line 14) | class DISK(BaseModel):
method _init (line 24) | def _init(self, conf):
method _forward (line 51) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/dog.py
function sift_to_rootsift (line 14) | def sift_to_rootsift(x):
class DoG (line 21) | class DoG(BaseModel):
method _init (line 36) | def _init(self, conf):
method to (line 47) | def to(self, *args, **kwargs):
method _forward (line 57) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/netvlad.py
class NetVLADLayer (line 18) | class NetVLADLayer(nn.Module):
method __init__ (line 19) | def __init__(self, input_dim=512, K=64, score_bias=False, intranorm=Tr...
method forward (line 29) | def forward(self, x):
class NetVLAD (line 43) | class NetVLAD(BaseModel):
method _init (line 57) | def _init(self, conf):
method _forward (line 129) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/openibl.py
class OpenIBL (line 7) | class OpenIBL(BaseModel):
method _init (line 13) | def _init(self, conf):
method _forward (line 20) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/r2d2.py
class R2D2 (line 12) | class R2D2(BaseModel):
method _init (line 26) | def _init(self, conf):
method _forward (line 36) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/extractors/superpoint.py
function sample_descriptors_fix_sampling (line 13) | def sample_descriptors_fix_sampling(keypoints, descriptors, s: int = 8):
class SuperPoint (line 26) | class SuperPoint(BaseModel):
method _init (line 37) | def _init(self, conf):
method _forward (line 42) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/localize_inloc.py
function interpolate_scan (line 16) | def interpolate_scan(scan, kp):
function get_scan_pose (line 38) | def get_scan_pose(dataset_dir, rpath):
function pose_from_cluster (line 61) | def pose_from_cluster(dataset_dir, q, retrieved, feature_file, match_file,
function main (line 114) | def main(dataset_dir, retrieval, features, matches, results,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/localize_sfm.py
function do_covisibility_clustering (line 15) | def do_covisibility_clustering(frame_ids: List[int],
class QueryLocalizer (line 51) | class QueryLocalizer:
method __init__ (line 52) | def __init__(self, reconstruction, config=None):
method localize (line 56) | def localize(self, points2D_all, points2D_idxs, points3D_id, query_cam...
function pose_from_cluster (line 67) | def pose_from_cluster(
function main (line 126) | def main(reference_sfm: Union[Path, pycolmap.Reconstruction],
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/match_dense.py
function to_cpts (line 88) | def to_cpts(kpts, ps):
function assign_keypoints (line 94) | def assign_keypoints(kpts: np.ndarray,
function get_grouped_ids (line 132) | def get_grouped_ids(array):
function get_unique_matches (line 143) | def get_unique_matches(match_ids, scores):
function matches_to_matches0 (line 155) | def matches_to_matches0(matches, scores):
function kpids_to_matches0 (line 166) | def kpids_to_matches0(kpt_ids0, kpt_ids1, scores):
function scale_keypoints (line 177) | def scale_keypoints(kpts, scale):
class ImagePairDataset (line 183) | class ImagePairDataset(torch.utils.data.Dataset):
method __init__ (line 191) | def __init__(self, image_dir, conf, pairs):
method preprocess (line 205) | def preprocess(self, image: np.ndarray):
method __len__ (line 232) | def __len__(self):
method __getitem__ (line 235) | def __getitem__(self, idx):
function match_dense (line 249) | def match_dense(conf: Dict,
function load_keypoints (line 305) | def load_keypoints(conf: Dict,
function aggregate_matches (line 341) | def aggregate_matches(
function assign_matches (line 435) | def assign_matches(
function match_and_assign (line 465) | def match_and_assign(conf: Dict,
function main (line 524) | def main(conf: Dict,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/match_features.py
class WorkQueue (line 72) | class WorkQueue():
method __init__ (line 73) | def __init__(self, work_fn, num_threads=1):
method join (line 82) | def join(self):
method thread_fn (line 88) | def thread_fn(self, work_fn):
method put (line 94) | def put(self, data):
class FeaturePairsDataset (line 98) | class FeaturePairsDataset(torch.utils.data.Dataset):
method __init__ (line 99) | def __init__(self, pairs, feature_path_q, feature_path_r):
method __getitem__ (line 104) | def __getitem__(self, idx):
method __len__ (line 120) | def __len__(self):
function writer_fn (line 124) | def writer_fn(inp, match_path):
function main (line 137) | def main(conf: Dict,
function find_unique_new_pairs (line 165) | def find_unique_new_pairs(pairs_all: List[Tuple[str]], match_path: Path ...
function match_from_paths (line 187) | def match_from_paths(conf: Dict,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/matchers/__init__.py
function get_matcher (line 1) | def get_matcher(matcher):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/matchers/adalam.py
class AdaLAM (line 9) | class AdaLAM(BaseModel):
method _init (line 31) | def _init(self, conf):
method _forward (line 34) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/matchers/loftr.py
class LoFTR (line 9) | class LoFTR(BaseModel):
method _init (line 20) | def _init(self, conf):
method _forward (line 25) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/matchers/nearest_neighbor.py
function find_nn (line 6) | def find_nn(sim, ratio_thresh, distance_thresh):
function mutual_check (line 19) | def mutual_check(m0, m1):
class NearestNeighbor (line 27) | class NearestNeighbor(BaseModel):
method _init (line 35) | def _init(self, conf):
method _forward (line 38) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/matchers/superglue.py
class SuperGlue (line 10) | class SuperGlue(BaseModel):
method _init (line 21) | def _init(self, conf):
method _forward (line 24) | def _forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pairs_from_covisibility.py
function main (line 11) | def main(model, output, num_matched):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pairs_from_exhaustive.py
function main (line 11) | def main(
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pairs_from_poses.py
function get_pairwise_distances (line 13) | def get_pairwise_distances(images):
function main (line 42) | def main(model, output, num_matched, rotation_threshold=DEFAULT_ROT_THRE...
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pairs_from_retrieval.py
function parse_names (line 15) | def parse_names(prefix, names, names_all):
function get_descriptors (line 36) | def get_descriptors(names, path, name2idx=None, key='global_descriptor'):
function pairs_from_score_matrix (line 48) | def pairs_from_score_matrix(scores: torch.Tensor,
function main (line 70) | def main(descriptors, output, num_matched,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/4Seasons/utils.py
function get_timestamps (line 13) | def get_timestamps(files, idx):
function delete_unused_images (line 29) | def delete_unused_images(root, timestamps):
function camera_from_calibration_file (line 41) | def camera_from_calibration_file(id_, path):
function parse_poses (line 56) | def parse_poses(path, colmap=False):
function parse_relocalization (line 76) | def parse_relocalization(path, has_poses=False):
function build_empty_colmap_model (line 96) | def build_empty_colmap_model(root, sfm_dir):
function generate_query_lists (line 131) | def generate_query_lists(timestamps, seq_dir, out_path):
function generate_localization_pairs (line 142) | def generate_localization_pairs(sequence, reloc, num, ref_pairs, out_path):
function prepare_submission (line 174) | def prepare_submission(results, relocs, poses_path, out_dir):
function evaluate_submission (line 204) | def evaluate_submission(submission_dir, relocs, ths=[0.1, 0.2, 0.5]):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/7Scenes/create_gt_sfm.py
function scene_coordinates (line 11) | def scene_coordinates(p2D, R_w2c, t_w2c, depth, camera):
function interpolate_depth (line 21) | def interpolate_depth(depth, kp):
function image_path_to_rendered_depth_path (line 43) | def image_path_to_rendered_depth_path(image_name):
function project_to_image (line 51) | def project_to_image(p3D, R, t, camera, eps: float = 1e-4, pad: int = 1):
function correct_sfm_with_gt_depth (line 63) | def correct_sfm_with_gt_depth(sfm_path, depth_folder_path, output_path):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/7Scenes/pipeline.py
function run_scene (line 14) | def run_scene(images, gt_dir, retrieval, outputs, results, num_covis,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/7Scenes/utils.py
function create_reference_sfm (line 9) | def create_reference_sfm(full_model, ref_model, blacklist=None, ext='.bi...
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/CMU/pipeline.py
function generate_query_list (line 10) | def generate_query_list(dataset, path, slice_):
function run_slice (line 29) | def run_slice(slice_, root, outputs, num_covis, num_loc):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/Cambridge/pipeline.py
function run_scene (line 13) | def run_scene(images, gt_dir, outputs, results, num_covis, num_loc):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/Cambridge/utils.py
function scale_sfm_images (line 12) | def scale_sfm_images(full_model, scaled_model, image_dir):
function create_query_list_with_intrinsics (line 44) | def create_query_list_with_intrinsics(model, out, list_file=None, ext='....
function evaluate (line 84) | def evaluate(model, results, list_file=None, ext='.bin', only_localized=...
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/RobotCar/colmap_from_nvm.py
function read_nvm_model (line 17) | def read_nvm_model(
function main (line 137) | def main(nvm, database, output, skip_points=False):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/pipelines/RobotCar/pipeline.py
function generate_query_list (line 13) | def generate_query_list(dataset, image_dir, path):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/reconstruction.py
function create_empty_db (line 15) | def create_empty_db(database_path: Path):
function import_images (line 26) | def import_images(image_dir: Path,
function get_image_ids (line 43) | def get_image_ids(database_path: Path) -> Dict[str, int]:
function run_reconstruction (line 52) | def run_reconstruction(sfm_dir: Path,
function main (line 93) | def main(sfm_dir: Path,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/triangulation.py
class OutputCapture (line 18) | class OutputCapture:
method __init__ (line 19) | def __init__(self, verbose: bool):
method __enter__ (line 22) | def __enter__(self):
method __exit__ (line 27) | def __exit__(self, exc_type, *args):
function create_db_from_model (line 35) | def create_db_from_model(reconstruction: pycolmap.Reconstruction,
function import_features (line 57) | def import_features(image_ids: Dict[str, int],
function import_matches (line 72) | def import_matches(image_ids: Dict[str, int],
function estimation_and_geometric_verification (line 103) | def estimation_and_geometric_verification(database_path: Path,
function geometric_verification (line 114) | def geometric_verification(image_ids: Dict[str, int],
function run_triangulation (line 181) | def run_triangulation(model_path: Path,
function main (line 200) | def main(sfm_dir: Path,
function parse_option_args (line 239) | def parse_option_args(args: List[str], default_options) -> Dict[str, Any]:
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/base_model.py
class BaseModel (line 8) | class BaseModel(nn.Module, metaclass=ABCMeta):
method __init__ (line 12) | def __init__(self, conf):
method forward (line 20) | def forward(self, data):
method _init (line 27) | def _init(self, conf):
method _forward (line 32) | def _forward(self, data):
function dynamic_load (line 37) | def dynamic_load(root, model):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/database.py
function image_ids_to_pair_id (line 115) | def image_ids_to_pair_id(image_id1, image_id2):
function pair_id_to_image_ids (line 121) | def pair_id_to_image_ids(pair_id):
function array_to_blob (line 127) | def array_to_blob(array):
function blob_to_array (line 134) | def blob_to_array(blob, dtype, shape=(-1,)):
class COLMAPDatabase (line 141) | class COLMAPDatabase(sqlite3.Connection):
method connect (line 144) | def connect(database_path):
method __init__ (line 148) | def __init__(self, *args, **kwargs):
method add_camera (line 166) | def add_camera(self, model, width, height, params,
method add_image (line 175) | def add_image(self, name, camera_id,
method add_keypoints (line 184) | def add_keypoints(self, image_id, keypoints):
method add_descriptors (line 193) | def add_descriptors(self, image_id, descriptors):
method add_matches (line 199) | def add_matches(self, image_id1, image_id2, matches):
method add_two_view_geometry (line 212) | def add_two_view_geometry(self, image_id1, image_id2, matches,
function example_usage (line 236) | def example_usage():
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/geometry.py
function to_homogeneous (line 5) | def to_homogeneous(p):
function vector_to_cross_product_matrix (line 9) | def vector_to_cross_product_matrix(v):
function compute_epipolar_errors (line 17) | def compute_epipolar_errors(qvec_r2t, tvec_r2t, p2d_r, p2d_t):
function pose_matrix_from_qvec_tvec (line 32) | def pose_matrix_from_qvec_tvec(qvec, tvec):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/io.py
function read_image (line 10) | def read_image(path, grayscale=False):
function list_h5_names (line 23) | def list_h5_names(path):
function get_keypoints (line 33) | def get_keypoints(path: Path, name: str,
function find_pair (line 44) | def find_pair(hfile: h5py.File, name0: str, name1: str):
function get_matches (line 63) | def get_matches(path: Path, name0: str, name1: str) -> Tuple[np.ndarray]:
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/parsers.py
function parse_image_list (line 10) | def parse_image_list(path, with_intrinsics=False):
function parse_image_lists (line 31) | def parse_image_lists(paths, with_intrinsics=False):
function parse_retrieval (line 40) | def parse_retrieval(path):
function names_to_pair (line 51) | def names_to_pair(name0, name1, separator='/'):
function names_to_pair_old (line 55) | def names_to_pair_old(name0, name1):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/read_write_model.py
class Image (line 52) | class Image(BaseImage):
method qvec2rotmat (line 53) | def qvec2rotmat(self):
function read_next_bytes (line 76) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
function write_next_bytes (line 88) | def write_next_bytes(fid, data, format_char_sequence, endian_character="...
function read_cameras_text (line 104) | def read_cameras_text(path):
function read_cameras_binary (line 130) | def read_cameras_binary(path_to_model_file):
function write_cameras_text (line 159) | def write_cameras_text(cameras, path):
function write_cameras_binary (line 176) | def write_cameras_binary(cameras, path_to_model_file):
function read_images_text (line 196) | def read_images_text(path):
function read_images_binary (line 227) | def read_images_binary(path_to_model_file):
function write_images_text (line 262) | def write_images_text(images, path):
function write_images_binary (line 290) | def write_images_binary(images, path_to_model_file):
function read_points3D_text (line 311) | def read_points3D_text(path):
function read_points3D_binary (line 338) | def read_points3D_binary(path_to_model_file):
function write_points3D_text (line 368) | def write_points3D_text(points3D, path):
function write_points3D_binary (line 393) | def write_points3D_binary(points3D, path_to_model_file):
function detect_model_format (line 412) | def detect_model_format(path, ext):
function read_model (line 421) | def read_model(path, ext=""):
function write_model (line 449) | def write_model(cameras, images, points3D, path, ext=".bin"):
function qvec2rotmat (line 461) | def qvec2rotmat(qvec):
function rotmat2qvec (line 474) | def rotmat2qvec(R):
function main (line 488) | def main():
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/viz.py
function cm_RdGn (line 15) | def cm_RdGn(x):
function plot_images (line 22) | def plot_images(imgs, titles=None, cmaps='gray', dpi=100, pad=.5,
function plot_keypoints (line 56) | def plot_keypoints(kpts, colors='lime', ps=4):
function plot_matches (line 70) | def plot_matches(kpts0, kpts1, color=None, lw=1.5, ps=4, indices=(0, 1),...
function add_text (line 112) | def add_text(idx, text, pos=(0.01, 0.99), fs=15, color='w',
function save_plot (line 123) | def save_plot(path, **kw):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/utils/viz_3d.py
function to_homogeneous (line 17) | def to_homogeneous(points):
function init_figure (line 22) | def init_figure(height: int = 800) -> go.Figure:
function plot_points (line 59) | def plot_points(
function plot_camera (line 75) | def plot_camera(
function plot_camera_colmap (line 124) | def plot_camera_colmap(
function plot_cameras (line 140) | def plot_cameras(
function plot_reconstruction (line 150) | def plot_reconstruction(
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/hloc/visualization.py
function visualize_sfm_2d (line 12) | def visualize_sfm_2d(reconstruction, image_dir, color_by='visibility',
function visualize_loc (line 56) | def visualize_loc(results, image_dir, reconstruction=None, db_image_dir=...
function visualize_loc_from_log (line 79) | def visualize_loc_from_log(image_dir, query_name, loc, reconstruction=None,
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/SuperGluePretrainedNetwork/models/matching.py
class Matching (line 49) | class Matching(torch.nn.Module):
method __init__ (line 51) | def __init__(self, config={}):
method forward (line 56) | def forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/SuperGluePretrainedNetwork/models/superglue.py
function MLP (line 51) | def MLP(channels: List[int], do_bn: bool = True) -> nn.Module:
function normalize_keypoints (line 65) | def normalize_keypoints(kpts, image_shape):
class KeypointEncoder (line 75) | class KeypointEncoder(nn.Module):
method __init__ (line 77) | def __init__(self, feature_dim: int, layers: List[int]) -> None:
method forward (line 82) | def forward(self, kpts, scores):
function attention (line 87) | def attention(query: torch.Tensor, key: torch.Tensor, value: torch.Tenso...
class MultiHeadedAttention (line 94) | class MultiHeadedAttention(nn.Module):
method __init__ (line 96) | def __init__(self, num_heads: int, d_model: int):
method forward (line 104) | def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch...
class AttentionalPropagation (line 112) | class AttentionalPropagation(nn.Module):
method __init__ (line 113) | def __init__(self, feature_dim: int, num_heads: int):
method forward (line 119) | def forward(self, x: torch.Tensor, source: torch.Tensor) -> torch.Tensor:
class AttentionalGNN (line 124) | class AttentionalGNN(nn.Module):
method __init__ (line 125) | def __init__(self, feature_dim: int, layer_names: List[str]) -> None:
method forward (line 132) | def forward(self, desc0: torch.Tensor, desc1: torch.Tensor) -> Tuple[t...
function log_sinkhorn_iterations (line 143) | def log_sinkhorn_iterations(Z: torch.Tensor, log_mu: torch.Tensor, log_n...
function log_optimal_transport (line 152) | def log_optimal_transport(scores: torch.Tensor, alpha: torch.Tensor, ite...
function arange_like (line 175) | def arange_like(x, dim: int):
class SuperGlue (line 179) | class SuperGlue(nn.Module):
method __init__ (line 206) | def __init__(self, config):
method forward (line 230) | def forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/SuperGluePretrainedNetwork/models/superpoint.py
function simple_nms (line 47) | def simple_nms(scores, nms_radius: int):
function remove_borders (line 65) | def remove_borders(keypoints, scores, border: int, height: int, width: i...
function top_k_keypoints (line 73) | def top_k_keypoints(keypoints, scores, k: int):
function sample_descriptors (line 80) | def sample_descriptors(keypoints, descriptors, s: int = 8):
class SuperPoint (line 95) | class SuperPoint(nn.Module):
method __init__ (line 111) | def __init__(self, config):
method forward (line 145) | def forward(self, data):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/SuperGluePretrainedNetwork/models/utils.py
class AverageTimer (line 57) | class AverageTimer:
method __init__ (line 60) | def __init__(self, smoothing=0.3, newline=False):
method reset (line 67) | def reset(self):
method update (line 74) | def update(self, name='default'):
method print (line 83) | def print(self, text='Timer'):
class VideoStreamer (line 99) | class VideoStreamer:
method __init__ (line 106) | def __init__(self, basedir, resize, skip, image_glob, max_length=10000...
method load_image (line 159) | def load_image(self, impath):
method next_frame (line 175) | def next_frame(self):
method start_ip_camera_thread (line 213) | def start_ip_camera_thread(self):
method update_ip_camera (line 220) | def update_ip_camera(self):
method cleanup (line 235) | def cleanup(self):
function process_resize (line 240) | def process_resize(w, h, resize):
function frame2tensor (line 259) | def frame2tensor(frame, device):
function read_image (line 263) | def read_image(path, device, resize, rotation, resize_float):
function estimate_pose (line 288) | def estimate_pose(kpts0, kpts1, K0, K1, thresh, conf=0.99999):
function rotate_intrinsics (line 315) | def rotate_intrinsics(K, image_shape, rot):
function rotate_pose_inplane (line 335) | def rotate_pose_inplane(i_T_w, rot):
function scale_intrinsics (line 346) | def scale_intrinsics(K, scales):
function to_homogeneous (line 351) | def to_homogeneous(points):
function compute_epipolar_error (line 355) | def compute_epipolar_error(kpts0, kpts1, T_0to1, K0, K1):
function angle_error_mat (line 377) | def angle_error_mat(R1, R2):
function angle_error_vec (line 383) | def angle_error_vec(v1, v2):
function compute_pose_error (line 388) | def compute_pose_error(T_0to1, R, t):
function pose_auc (line 397) | def pose_auc(errors, thresholds):
function plot_image_pair (line 415) | def plot_image_pair(imgs, dpi=100, size=6, pad=.5):
function plot_keypoints (line 429) | def plot_keypoints(kpts0, kpts1, color='w', ps=2):
function plot_matches (line 435) | def plot_matches(kpts0, kpts1, color, lw=1.5, ps=4):
function make_matching_plot (line 452) | def make_matching_plot(image0, image1, kpts0, kpts1, mkpts0, mkpts1,
function make_matching_plot_fast (line 484) | def make_matching_plot_fast(image0, image1, kpts0, kpts1, mkpts0,
function error_colormap (line 553) | def error_colormap(x):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/d2net/train.py
function process_epoch (line 156) | def process_epoch(
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/__main__.py
function viz_dataset (line 7) | def viz_dataset(db, nr=6, nc=6):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/create.py
class DatasetCreator (line 5) | class DatasetCreator:
method __init__ (line 15) | def __init__(self, globs):
method __call__ (line 19) | def __call__(self, dataset_cmd ):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/dataset.py
class Dataset (line 8) | class Dataset(object):
method __len__ (line 36) | def __len__(self):
method get_filename (line 39) | def get_filename(self, img_idx, root=None):
method get_key (line 42) | def get_key(self, img_idx):
method key_to_index (line 45) | def key_to_index(self, key):
method get_image (line 50) | def get_image(self, img_idx, resize=None):
method get_image_size (line 57) | def get_image_size(self, img_idx):
method get_label (line 60) | def get_label(self, img_idx, toint=False):
method has_label (line 63) | def has_label(self):
method get_query_db (line 67) | def get_query_db(self):
method get_query_groundtruth (line 70) | def get_query_groundtruth(self, query_idx, what='AP'):
method eval_query_AP (line 83) | def eval_query_AP(self, query_idx, scores):
method eval_query_top (line 94) | def eval_query_top(self, query_idx, scores, k=(1,5,10,20,50,100)):
method original (line 103) | def original(self):
method __repr__ (line 106) | def __repr__(self):
function split (line 122) | def split( dataset, train_prop, val_prop=0, method='balanced' ):
class SubDataset (line 201) | class SubDataset(Dataset):
method __init__ (line 204) | def __init__(self, dataset, indices):
method get_key (line 213) | def get_key(self, i):
method get_label (line 216) | def get_label(self, i, **kw):
method get_bbox (line 219) | def get_bbox(self, i, **kw):
method __repr__ (line 225) | def __repr__(self):
method viz_distr (line 231) | def viz_distr(self):
class CatDataset (line 241) | class CatDataset(Dataset):
method __init__ (line 244) | def __init__(self, *datasets):
method which (line 275) | def which(self, i):
method get (line 280) | def get(self, i, attr):
method __getattr__ (line 284) | def __getattr__(self, name):
method call (line 292) | def call(self, i, func, *args, **kwargs):
method get_key (line 296) | def get_key(self, i):
method get_label (line 301) | def get_label(self, i, toint=False):
method get_bbox (line 305) | def get_bbox(self,i):
method get_polygons (line 309) | def get_polygons(self,i,**kw):
function deploy (line 316) | def deploy( dataset, target_dir, transforms=None, redo=False, ext=None, ...
class DeployedDataset (line 404) | class DeployedDataset(Dataset):
method __init__ (line 407) | def __init__(self, dataset, root, imsizes=None, trfs=None, ext=None):
method __repr__ (line 442) | def __repr__(self):
method __len__ (line 447) | def __len__(self):
method get_key (line 450) | def get_key(self, i):
method get_something (line 455) | def get_something(self, what, i, *args, **fmt):
method get_bbox (line 464) | def get_bbox(self, i, **kw):
method get_polygons (line 467) | def get_polygons(self, i, *args, **kw):
method get_label_map (line 470) | def get_label_map(self, i, *args, **kw):
method get_instance_map (line 473) | def get_instance_map(self, i, *args, **kw):
method get_angle_map (line 476) | def get_angle_map(self, i, *args, **kw):
method original (line 480) | def original(self):
function deploy_and_split (line 485) | def deploy_and_split( trainset, deploy_trf=None, deploy_dir='/dev/shm',
class CropDataset (line 514) | class CropDataset(Dataset):
method __init__ (line 517) | def __init__(self, dataset, list_of_imgs_and_crops):
method get_image (line 524) | def get_image(self, img_idx):
method get_filename (line 531) | def get_filename(self, img_idx):
method get_key (line 534) | def get_key(self, img_idx):
method crop_image (line 537) | def crop_image(self, img, polygons):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/downloader.py
function download_dataset (line 6) | def download_dataset(dataset):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/generic.py
class ImageList (line 13) | class ImageList(Dataset):
method __init__ (line 18) | def __init__(self, img_list_path, root='', imgs=None):
method get_key (line 29) | def get_key(self, i):
class LabelledDataset (line 33) | class LabelledDataset (Dataset):
method find_classes (line 37) | def find_classes(self, *arg, **cls_idx):
class ImageListLabels (line 44) | class ImageListLabels(LabelledDataset):
method __init__ (line 49) | def __init__(self, img_list_path, root=None):
method get_key (line 67) | def get_key(self, i):
method get_label (line 70) | def get_label(self, i, toint=False):
method get_query_db (line 76) | def get_query_db(self):
class ImageListLabelsQ (line 80) | class ImageListLabelsQ(ImageListLabels):
method __init__ (line 85) | def __init__(self, img_list_path, query_list_path, root=None):
method find_classes (line 98) | def find_classes(self, *arg, **cls_idx):
method get_query_db (line 104) | def get_query_db(self):
class ImagesAndLabels (line 108) | class ImagesAndLabels(ImageListLabels):
method __init__ (line 113) | def __init__(self, imgs, labels, cls_idx, root=None):
class ImageListRelevants (line 124) | class ImageListRelevants(Dataset):
method __init__ (line 130) | def __init__(self, gt_file, root=None, img_dir='jpg', ext='.jpg'):
method get_relevants (line 150) | def get_relevants(self, qimg_idx, mode='classic'):
method get_junk (line 161) | def get_junk(self, qimg_idx, mode='classic'):
method get_query_filename (line 172) | def get_query_filename(self, qimg_idx, root=None):
method get_query_roi (line 175) | def get_query_roi(self, qimg_idx):
method get_key (line 178) | def get_key(self, i):
method get_query_key (line 181) | def get_query_key(self, i):
method get_query_db (line 184) | def get_query_db(self):
method get_query_groundtruth (line 187) | def get_query_groundtruth(self, query_idx, what='AP', mode='classic'):
method eval_query_AP (line 196) | def eval_query_AP(self, query_idx, scores):
class ImageListROIs (line 227) | class ImageListROIs(Dataset):
method __init__ (line 228) | def __init__(self, root, img_dir, imgs, rois):
method get_key (line 238) | def get_key(self, i):
method get_roi (line 241) | def get_roi(self, i):
method get_image (line 244) | def get_image(self, img_idx, resize=None):
function not_none (line 253) | def not_none(label):
class ImageClusters (line 257) | class ImageClusters(LabelledDataset):
method __init__ (line 262) | def __init__(self, json_path, root=None, filter=not_none):
method get_key (line 285) | def get_key(self, i):
method get_label (line 288) | def get_label(self, i, toint=False):
class NullCluster (line 295) | class NullCluster(ImageClusters):
method __init__ (line 298) | def __init__(self, json_path, root=None):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/generic_func.py
function find_and_list_classes (line 8) | def find_and_list_classes(labels, cls_idx=None ):
function find_relevants (line 46) | def find_relevants(labels):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/landmarks.py
class Landmarks_clean (line 6) | class Landmarks_clean(ImageListLabels):
method __init__ (line 7) | def __init__(self):
class Landmarks_clean_val (line 11) | class Landmarks_clean_val(ImageListLabels):
method __init__ (line 12) | def __init__(self):
class Landmarks_lite (line 16) | class Landmarks_lite(ImageListLabels):
method __init__ (line 17) | def __init__(self):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/landmarks18.py
class Landmarks18_train (line 6) | class Landmarks18_train(ImageListLabels):
method __init__ (line 7) | def __init__(self):
class Landmarks18 (line 11) | class Landmarks18(ImageListLabels):
method __init__ (line 12) | def __init__(self):
class Landmarks18_lite (line 16) | class Landmarks18_lite(ImageListLabels):
method __init__ (line 17) | def __init__(self):
class Landmarks18_mid (line 21) | class Landmarks18_mid(ImageListLabels):
method __init__ (line 22) | def __init__(self):
class Landmarks18_5K (line 26) | class Landmarks18_5K(ImageListLabels):
method __init__ (line 27) | def __init__(self):
class Landmarks18_val (line 31) | class Landmarks18_val(ImageListLabels):
method __init__ (line 32) | def __init__(self):
class Landmarks18_valdstr (line 36) | class Landmarks18_valdstr(ImageListLabels):
method __init__ (line 37) | def __init__(self):
class Landmarks18_index (line 41) | class Landmarks18_index(ImageList):
method __init__ (line 42) | def __init__(self):
class Landmarks18_new_index (line 46) | class Landmarks18_new_index(ImageList):
method __init__ (line 47) | def __init__(self):
class Landmarks18_test (line 51) | class Landmarks18_test(ImageList):
method __init__ (line 52) | def __init__(self):
class Landmarks18_pca (line 56) | class Landmarks18_pca(ImageList):
method __init__ (line 57) | def __init__(self):
class Landmarks18_missing_index (line 61) | class Landmarks18_missing_index(ImageList):
method __init__ (line 62) | def __init__(self):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/oxford.py
class Oxford5K (line 6) | class Oxford5K(ImageListRelevants):
method __init__ (line 7) | def __init__(self):
class ROxford5K (line 11) | class ROxford5K(ImageListRelevants):
method __init__ (line 12) | def __init__(self):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/datasets/paris.py
class Paris6K (line 6) | class Paris6K(ImageListRelevants):
method __init__ (line 7) | def __init__(self):
class RParis6K (line 11) | class RParis6K(ImageListRelevants):
method __init__ (line 12) | def __init__(self):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/extract_features.py
function extract_features (line 26) | def extract_features(db, net, trfs, pooling='mean', gemp=3, detailed=Fal...
function load_model (line 71) | def load_model(path, iscuda):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/extract_kapture.py
function extract_kapture_global_features (line 20) | def extract_kapture_global_features(kapture_root_path: str, net, trfs, p...
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/loss.py
class APLoss (line 7) | class APLoss (nn.Module):
method __init__ (line 20) | def __init__(self, nq=25, min=0, max=1):
method forward (line 43) | def forward(self, x, label, qw=None, ret='1-mAP'):
method measures (line 67) | def measures(self, x, gt, loss=None):
class TAPLoss (line 73) | class TAPLoss (APLoss):
method __init__ (line 86) | def __init__(self, nq=25, min=0, max=1, simplified=False):
method forward (line 90) | def forward(self, x, label, qw=None, ret='1-mAP'):
method measures (line 133) | def measures(self, x, gt, loss=None):
class TripletMarginLoss (line 139) | class TripletMarginLoss(nn.TripletMarginLoss):
method eval_func (line 144) | def eval_func(self, dp, dn):
class TripletLogExpLoss (line 148) | class TripletLogExpLoss(nn.Module):
method __init__ (line 183) | def __init__(self, p=2, eps=1e-6, swap=False):
method forward (line 189) | def forward(self, anchor, positive, negative):
method eval_func (line 206) | def eval_func(self, dp, dn):
function sim_to_dist (line 210) | def sim_to_dist(scores):
class APLoss_dist (line 214) | class APLoss_dist (APLoss):
method forward (line 215) | def forward(self, x, label, **kw):
class TAPLoss_dist (line 220) | class TAPLoss_dist (TAPLoss):
method forward (line 221) | def forward(self, x, label, **kw):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/__init__.py
function list_archs (line 18) | def list_archs():
function create_model (line 26) | def create_model(arch, pretrained='', delete_fc=False, *args, **kwargs):
function load_pretrained_weights (line 69) | def load_pretrained_weights(net, state_dict, delete_fc=False):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/backbones/__init__.py
function load_pretrained_weights (line 4) | def load_pretrained_weights(net, state_dict):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/backbones/resnet.py
function conv3x3 (line 9) | def conv3x3(in_planes, out_planes, stride=1):
class BasicBlock (line 15) | class BasicBlock(nn.Module):
method __init__ (line 18) | def __init__(self, inplanes, planes, stride=1, downsample=None):
method forward (line 28) | def forward(self, x):
class Bottleneck (line 46) | class Bottleneck(nn.Module):
method __init__ (line 54) | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=...
method forward (line 67) | def forward(self, x):
function reset_weights (line 92) | def reset_weights(net):
class ResNet (line 102) | class ResNet(nn.Module):
method __init__ (line 105) | def __init__(self, block, layers, fc_out, model_name, self_similarity_...
method _make_layer (line 134) | def _make_layer(self, block, planes, blocks, stride=1, self_similarity...
method forward (line 157) | def forward(self, x, out_layer=0):
method load_pretrained_weights (line 176) | def load_pretrained_weights(self, pretrain_code):
function resnet18 (line 205) | def resnet18(out_dim=2048):
function resnet50 (line 211) | def resnet50(out_dim=2048):
function resnet101 (line 217) | def resnet101(out_dim=2048):
function resnet152 (line 223) | def resnet152(out_dim=2048):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/backbones/resnext101_features.py
class LambdaBase (line 7) | class LambdaBase(nn.Sequential):
method __init__ (line 8) | def __init__(self, fn, *args):
method forward_prepare (line 12) | def forward_prepare(self, input):
class Lambda (line 18) | class Lambda(LambdaBase):
method forward (line 19) | def forward(self, input):
class LambdaMap (line 22) | class LambdaMap(LambdaBase):
method forward (line 23) | def forward(self, input):
class LambdaReduce (line 26) | class LambdaReduce(LambdaBase):
method forward (line 27) | def forward(self, input):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/layers/pooling.py
class GeneralizedMeanPooling (line 12) | class GeneralizedMeanPooling(Module):
method __init__ (line 31) | def __init__(self, norm, output_size=1, eps=1e-6):
method forward (line 38) | def forward(self, x):
method __repr__ (line 42) | def __repr__(self):
class GeneralizedMeanPoolingP (line 49) | class GeneralizedMeanPoolingP(GeneralizedMeanPooling):
method __init__ (line 52) | def __init__(self, norm=3, output_size=1, eps=1e-6):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/rmac_resnet.py
function l2_normalize (line 7) | def l2_normalize(x, axis=-1):
class ResNet_RMAC (line 12) | class ResNet_RMAC(ResNet):
method __init__ (line 15) | def __init__(self, block, layers, model_name, out_dim=2048, norm_featu...
method forward (line 39) | def forward(self, x):
function resnet18_rmac (line 74) | def resnet18_rmac(backbone=ResNet_RMAC, **kwargs):
function resnet50_rmac (line 78) | def resnet50_rmac(backbone=ResNet_RMAC, **kwargs):
function resnet101_rmac (line 82) | def resnet101_rmac(backbone=ResNet_RMAC, **kwargs):
function resnet152_rmac (line 86) | def resnet152_rmac(backbone=ResNet_RMAC, **kwargs):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/rmac_resnet_fpn.py
function l2_normalize (line 6) | def l2_normalize(x, axis=-1):
class ResNet_RMAC_FPN (line 11) | class ResNet_RMAC_FPN(ResNet):
method __init__ (line 14) | def __init__(self, block, layers, model_name, out_dim=None, norm_featu...
method forward (line 52) | def forward(self, x):
function resnet18_fpn_rmac (line 95) | def resnet18_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
function resnet50_fpn_rmac (line 99) | def resnet50_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
function resnet101_fpn_rmac (line 103) | def resnet101_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
function resnet101_fpn0_rmac (line 107) | def resnet101_fpn0_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
function resnet152_fpn_rmac (line 111) | def resnet152_fpn_rmac(backbone=ResNet_RMAC_FPN, **kwargs):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/nets/rmac_resnext.py
function l2_normalize (line 5) | def l2_normalize(x, axis=-1):
class ResNext_RMAC (line 10) | class ResNext_RMAC(nn.Module):
method __init__ (line 13) | def __init__(self, backbone, out_dim=2048, norm_features=False,
method forward (line 36) | def forward(self, x):
function resnet18_rmac (line 69) | def resnet18_rmac(backbone=ResNet_RMAC, **kwargs):
function resnet50_rmac (line 73) | def resnet50_rmac(backbone=ResNet_RMAC, **kwargs):
function resnet101_rmac (line 77) | def resnet101_rmac(backbone=ResNet_RMAC, **kwargs):
function resnet152_rmac (line 81) | def resnet152_rmac(backbone=ResNet_RMAC, **kwargs):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/test_dir.py
function expand_descriptors (line 24) | def expand_descriptors(descs, db=None, alpha=0, k=0):
function extract_image_features (line 47) | def extract_image_features(dataset, transforms, net, ret_imgs=False, sam...
function eval_model (line 97) | def eval_model(db, net, trfs, pooling='mean', gemp=3, detailed=False, wh...
function load_model (line 183) | def load_model(path, iscuda):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/common.py
function typename (line 19) | def typename(x):
function tonumpy (line 23) | def tonumpy(x):
function matmul (line 30) | def matmul(A, B):
function pool (line 41) | def pool(x, pooling='mean', gemp=3):
function torch_set_gpu (line 58) | def torch_set_gpu(gpus, seed=None, randomize=True):
function torch_set_seed (line 84) | def torch_set_seed(seed, cuda, randomize=True):
function save_checkpoint (line 102) | def save_checkpoint(state, is_best, filename):
function load_checkpoint (line 117) | def load_checkpoint(filename, iscuda=False):
function switch_model_to_cuda (line 150) | def switch_model_to_cuda(model, iscuda=True, checkpoint=None):
function model_size (line 178) | def model_size(model):
function freeze_batch_norm (line 187) | def freeze_batch_norm(model, freeze=True, only_running=False):
function variables (line 205) | def variables(inputs, iscuda, not_on_gpu=[]):
function transform (line 221) | def transform(pca, X, whitenp=0.5, whitenv=None, whitenm=1.0, use_sklear...
function whiten_features (line 235) | def whiten_features(X, pca, l2norm=True, whitenp=0.5, whitenv=None, whit...
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/convenient.py
function mkdir (line 6) | def mkdir(d):
function mkdir (line 11) | def mkdir( fname, isfile='auto' ):
function touch (line 26) | def touch(filename):
function assert_outpath (line 35) | def assert_outpath( path, ext='', mkdir=False ):
class _BasePool (line 51) | class _BasePool (object):
method __init__ (line 52) | def __init__(self, nt=0):
method starmap (line 54) | def starmap(self, func, args):
class ProcessPool (line 57) | class ProcessPool (_BasePool):
method __init__ (line 58) | def __init__(self, nt=0):
class ThreadPool (line 62) | class ThreadPool (_BasePool):
method __init__ (line 63) | def __init__(self, nt=0):
function is_iterable (line 71) | def is_iterable(val, exclude={str}):
function listify (line 83) | def listify( val, exclude={str} ):
function unlistify (line 88) | def unlistify( lis ):
function sig_folder_ext (line 98) | def sig_folder_ext(f):
function sig_folder (line 100) | def sig_folder(f):
function sig_ext (line 102) | def sig_ext(f):
function sig_3folder_ext (line 104) | def sig_3folder_ext(f):
function sig_all (line 108) | def sig_all(f):
function saferm (line 111) | def saferm(f, sig=sig_folder_ext ):
function tic (line 139) | def tic(tag='tic'):
function toc (line 144) | def toc(tag='', cum=False):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/evaluation.py
function accuracy_topk (line 8) | def accuracy_topk(output, target, topk=(1,)):
function compute_AP (line 41) | def compute_AP(label, score):
function compute_average_precision (line 46) | def compute_average_precision(positive_ranks):
function compute_average_precision_quantized (line 85) | def compute_average_precision_quantized(labels, idx, step=0.01):
function pixelwise_iou (line 101) | def pixelwise_iou(output, target):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/funcs.py
function sigmoid (line 7) | def sigmoid(x, a=1, b=0):
function sigmoid_range (line 11) | def sigmoid_range(x, at5, at95):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/pytorch_loader.py
function get_loader (line 11) | def get_loader( dataset, trf_chain, iscuda,
class PytorchLoader (line 78) | class PytorchLoader (data.Dataset):
method __init__ (line 96) | def __init__(self, dataset, transform=None,
method __getitem__ (line 106) | def __getitem__(self, index):
method __len__ (line 164) | def __len__(self):
method __repr__ (line 167) | def __repr__(self):
class BalancedSampler (line 184) | class BalancedSampler (data.sampler.Sampler):
method __init__ (line 202) | def __init__(self, dataset, size=1.0, balanced=1.0, use_all=False):
method __iter__ (line 220) | def __iter__(self):
method __len__ (line 248) | def __len__(self):
function load_one_img (line 256) | def load_one_img( loader ):
function tensor2img (line 270) | def tensor2img(tensor, model):
function test_loader_speed (line 287) | def test_loader_speed(loader_):
function try_to_get (line 298) | def try_to_get(func, *args, **kwargs):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/transforms.py
function create (line 11) | def create(cmd_line, to_tensor=False, **vars):
class Identity (line 40) | class Identity (object):
method __call__ (line 43) | def __call__(self, inp):
class Pad (line 47) | class Pad(object):
method __init__ (line 53) | def __init__(self, size, color=(127,127,127)):
method __call__ (line 60) | def __call__(self, inp):
class PadSquare (line 77) | class PadSquare (object):
method __init__ (line 87) | def __init__(self, size=None, color=(127,127,127)):
method __call__ (line 94) | def __call__(self, inp):
class RandomBorder (line 108) | class RandomBorder (object):
method __init__ (line 112) | def __init__(self, min_size, max_size, color=(127,127,127)):
method __call__ (line 122) | def __call__(self, inp):
class Scale (line 133) | class Scale (object):
method __init__ (line 144) | def __init__(self, size, interpolation=Image.BILINEAR, largest=False, ...
method get_params (line 154) | def get_params(self, imsize):
method __call__ (line 174) | def __call__(self, inp):
class RandomScale (line 189) | class RandomScale (Scale):
method __init__ (line 203) | def __init__(self, min_size, max_size, ar=1, can_upscale=False, can_do...
method get_params (line 214) | def get_params(self, imsize):
class RandomCrop (line 249) | class RandomCrop (object):
method __init__ (line 262) | def __init__(self, size, padding=0):
method get_params (line 270) | def get_params(img, output_size):
method __call__ (line 279) | def __call__(self, inp):
class CenterCrop (line 301) | class CenterCrop (RandomCrop):
method get_params (line 310) | def get_params(img, output_size):
class CropToBbox (line 319) | class CropToBbox(object):
method __init__ (line 328) | def __init__(self, margin=0.5, min_size=0):
method __call__ (line 332) | def __call__(self, inp):
class RandomRotation (line 361) | class RandomRotation(object):
method __init__ (line 372) | def __init__(self, degrees, interpolation=Image.BILINEAR):
method __call__ (line 376) | def __call__(self, inp):
class RandomFlip (line 391) | class RandomFlip (object):
method __call__ (line 394) | def __call__(self, inp):
class RandomTilting (line 406) | class RandomTilting(object):
method __init__ (line 417) | def __init__(self, magnitude, directions='all'):
method __call__ (line 421) | def __call__(self, inp):
class StillTransform (line 491) | class StillTransform (object):
method _transform (line 494) | def _transform(self, img):
method __call__ (line 497) | def __call__(self, inp):
class ColorJitter (line 507) | class ColorJitter (StillTransform):
method __init__ (line 519) | def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
method get_params (line 526) | def get_params(brightness, contrast, saturation, hue):
method _transform (line 555) | def _transform(self, img):
class RandomErasing (line 560) | class RandomErasing (StillTransform):
method __init__ (line 576) | def __init__(self, area):
method _transform (line 579) | def _transform(self, image):
class ToTensor (line 617) | class ToTensor (StillTransform, tvf.ToTensor):
method _transform (line 618) | def _transform(self, img):
class Normalize (line 621) | class Normalize (StillTransform, tvf.Normalize):
method _transform (line 622) | def _transform(self, img):
class BBoxToPixelLabel (line 626) | class BBoxToPixelLabel (object):
method __init__ (line 629) | def __init__(self, nclass, downsize, mode):
method __call__ (line 637) | def __call__(self, inp):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/deep-image-retrieval/dirtorch/utils/transforms_tools.py
function is_pil_image (line 6) | def is_pil_image(img):
class DummyImg (line 9) | class DummyImg:
method __init__ (line 12) | def __init__(self, size):
method resize (line 15) | def resize(self, size, *args, **kwargs):
method expand (line 18) | def expand(self, border):
method crop (line 27) | def crop(self, border):
method rotate (line 35) | def rotate(self, angle):
method transform (line 38) | def transform(self, size, *args, **kwargs):
function grab_img (line 42) | def grab_img( img_and_label ):
function update_img_and_labels (line 59) | def update_img_and_labels(img_and_label, img, aff=None, persp=None):
function rand_log_uniform (line 100) | def rand_log_uniform(a, b):
function int_tuple (line 104) | def int_tuple(*args):
function aff_translate (line 107) | def aff_translate(tx, ty):
function aff_rotate (line 111) | def aff_rotate(angle):
function aff_mul (line 115) | def aff_mul(aff, aff2):
function persp_mul (line 134) | def persp_mul(mat, mat2):
function adjust_brightness (line 157) | def adjust_brightness(img, brightness_factor):
function adjust_contrast (line 175) | def adjust_contrast(img, contrast_factor):
function adjust_saturation (line 193) | def adjust_saturation(img, saturation_factor):
function adjust_hue (line 211) | def adjust_hue(img, hue_factor):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/colmap/colmap/database.py
function image_ids_to_pair_id (line 113) | def image_ids_to_pair_id(image_id1, image_id2):
function pair_id_to_image_ids (line 119) | def pair_id_to_image_ids(pair_id):
function array_to_blob (line 125) | def array_to_blob(array):
function blob_to_array (line 132) | def blob_to_array(blob, dtype, shape=(-1,)):
class COLMAPDatabase (line 139) | class COLMAPDatabase(sqlite3.Connection):
method connect (line 142) | def connect(database_path):
method __init__ (line 146) | def __init__(self, *args, **kwargs):
method add_camera (line 164) | def add_camera(self, model, width, height, params,
method add_image (line 173) | def add_image(self, name, camera_id,
method add_keypoints (line 181) | def add_keypoints(self, image_id, keypoints):
method add_descriptors (line 190) | def add_descriptors(self, image_id, descriptors):
method add_matches (line 196) | def add_matches(self, image_id1, image_id2, matches):
method add_two_view_geometry (line 209) | def add_two_view_geometry(self, image_id1, image_id2, matches,
function example_usage (line 228) | def example_usage():
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/colmap/colmap/read_dense.py
function read_array (line 39) | def read_array(path):
function parse_args (line 57) | def parse_args():
function main (line 73) | def main():
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/colmap/colmap/read_model.py
class Image (line 48) | class Image(BaseImage):
method qvec2rotmat (line 49) | def qvec2rotmat(self):
function read_next_bytes (line 70) | def read_next_bytes(fid, num_bytes, format_char_sequence, endian_charact...
function read_cameras_text (line 81) | def read_cameras_text(path):
function read_cameras_binary (line 107) | def read_cameras_binary(path_to_model_file):
function read_images_text (line 136) | def read_images_text(path):
function read_images_binary (line 167) | def read_images_binary(path_to_model_file):
function read_points3D_text (line 203) | def read_points3D_text(path):
function read_points3d_binary (line 230) | def read_points3d_binary(path_to_model_file):
function read_model (line 260) | def read_model(path, ext):
function qvec2rotmat (line 272) | def qvec2rotmat(qvec):
function rotmat2qvec (line 285) | def rotmat2qvec(R):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/colmap/colmap2dataset.py
function convert_depth (line 8) | def convert_depth(name, src_path, dst_path):
function camera_to_K (line 22) | def camera_to_K(camera):
function create_calibration (line 37) | def create_calibration(image, camera, prefix):
function covisible_pairs (line 48) | def covisible_pairs(images, low=0.5, high=0.8):
function encode_pairs (line 78) | def encode_pairs(pairs):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/colmap/h5_to_db.py
function get_focal (line 8) | def get_focal(image_path, err_on_default=False):
function create_camera (line 36) | def create_camera(db, image_path):
function add_keypoints (line 58) | def add_keypoints(db, h5_path, image_path):
function add_matches (line 80) | def add_matches(db, h5_path, fname_to_id):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/compute_validation_auc.py
function read_deltas (line 8) | def read_deltas(path):
function calculate_auc (line 26) | def calculate_auc(Δ_θ, Δ_T, length):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/detect.py
class Image (line 11) | class Image:
method __init__ (line 12) | def __init__(self, bitmap: ['C', 'H', 'W'], fname: str, orig_shape=None):
method resize_to (line 20) | def resize_to(self, shape):
method to_image_coord (line 28) | def to_image_coord(self, xys: [2, 'N']) -> ([2, 'N'], ['N']):
method _compute_interpolation_size (line 39) | def _compute_interpolation_size(self, shape):
method _interpolate (line 53) | def _interpolate(self, image: ['C', 'H', 'W'], shape) -> ['C', 'h', 'w']:
method _pad (line 63) | def _pad(self, image: ['C', 'H', 'W'], shape) -> ['C', 'h', 'w']:
class SceneDataset (line 73) | class SceneDataset:
method __init__ (line 74) | def __init__(self, image_path, crop_size=(None, None)):
method __len__ (line 80) | def __len__(self):
method __getitem__ (line 83) | def __getitem__(self, ix):
method collate_fn (line 103) | def collate_fn(images):
function extract (line 108) | def extract(dataset, save_path):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/disk/common/errors.py
class DataError (line 1) | class DataError(Exception):
class EstimationFailedError (line 4) | class EstimationFailedError(Exception):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/disk/common/image.py
function _rescale (line 8) | def _rescale(tensor: ['C', 'H', 'W'], size) -> ['C', 'h', 'w']:
function _pad (line 17) | def _pad(tensor: ['C', 'H', 'W'], size, value=0.):
class Image (line 32) | class Image:
method __init__ (line 34) | def __init__(
method K_inv (line 54) | def K_inv(self):
method hwc (line 58) | def hwc(self):
method shape (line 62) | def shape(self):
method scale (line 65) | def scale(self, size):
method pad (line 95) | def pad(self, size):
method to (line 104) | def to(self, *args, **kwargs):
method unproject (line 119) | def unproject(self, xy: [2, 'N']) -> [3, 'N']:
method project (line 133) | def project(self, xyw: [3, 'N']) -> [2, 'N']:
method in_range_mask (line 139) | def in_range_mask(self, xy: [2, 'N']) -> ['N']:
method fetch_depth (line 146) | def fetch_depth(self, xy: [2, 'N']) -> ['N']:
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/disk/common/logger.py
class Logger (line 3) | class Logger:
method __init__ (line 4) | def __init__(self, path):
method add_scalar (line 8) | def add_scalar(self, tag, value):
method add_scalars (line 17) | def add_scalars(self, tag_to_value, prefix=''):
FILE: AutoReconForDens3R/third_party/AutoDecomp/third_party/Hierarchical-Localization/third_party/disk/disk/common/structs.py
class NpArray (
Copy disabled (too large)
Download .json
Condensed preview — 775 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (36,921K chars).
[
{
"path": "AutoReconForDens3R/.gitattributes",
"chars": 30,
"preview": "*.ipynb linguist-documentation"
},
{
"path": "AutoReconForDens3R/.github/ISSUE_TEMPLATE/bug-report.md",
"chars": 535,
"preview": "---\nname: Bug report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Describe the b"
},
{
"path": "AutoReconForDens3R/.github/ISSUE_TEMPLATE/feature_request.md",
"chars": 595,
"preview": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Is your fea"
},
{
"path": "AutoReconForDens3R/.github/workflows/core_code_checks.yml",
"chars": 1163,
"preview": "name: Core Tests.\n\non:\n push:\n branches: [main]\n pull_request:\n branches: [main]\n\npermissions:\n contents: read\n"
},
{
"path": "AutoReconForDens3R/.github/workflows/publish.yml",
"chars": 951,
"preview": "# This workflows will upload a Python Package using twine when a release is created\n# For more information see: https://"
},
{
"path": "AutoReconForDens3R/.github/workflows/viewer_build_deploy.yml",
"chars": 2835,
"preview": "name: Viewer Build and Deploy.\n\non:\n push:\n branches: [main]\n pull_request:\n branches: [main]\n\njobs:\n build:\n "
},
{
"path": "AutoReconForDens3R/.gitignore",
"chars": 3537,
"preview": "*_debug_*\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distributio"
},
{
"path": "AutoReconForDens3R/.prettierrc.js",
"chars": 126,
"preview": "module.exports = {\n trailingComma: 'all',\n arrowParens: 'always',\n singleQuote: true,\n jsxSingleQuote: false"
},
{
"path": "AutoReconForDens3R/LICENSE",
"chars": 11357,
"preview": " Apache License\n Version 2.0, January 2004\n "
},
{
"path": "AutoReconForDens3R/README.md",
"chars": 4112,
"preview": "# AutoRecon: Automated 3D Object Discovery and Reconstruction\n\n### [Project Page](https://zju3dv.github.io/autorecon) | "
},
{
"path": "AutoReconForDens3R/colab/demo.ipynb",
"chars": 19227,
"preview": "{\n \"cells\": [\n {\n \"cell_type\": \"markdown\",\n \"metadata\": {\n \"colab_type\": \"text\",\n \"id\": \"vie"
},
{
"path": "AutoReconForDens3R/docs/INSTALL.md",
"chars": 1069,
"preview": "# Installation\n\n## Create a conda environment\n```bash\nconda create --name auto_recon -y python=3.9\nconda activate auto_r"
},
{
"path": "AutoReconForDens3R/exps/code-release/bmvs/scan1.sh",
"chars": 2351,
"preview": "\nscan_name=\"scan1\"\n\nDATA_ROOT=data\nINST_REL_DIR=BlendedMVS/$scan_name\nFORCE_RERUN=True\n\n# Coarse decomposition\npython th"
},
{
"path": "AutoReconForDens3R/exps/code-release/bmvs/scan2.sh",
"chars": 2352,
"preview": "\nscan_name=\"scan2\"\n\nDATA_ROOT=data\nINST_REL_DIR=BlendedMVS/$scan_name\nFORCE_RERUN=True\n\n# Coarse decomposition\npython th"
},
{
"path": "AutoReconForDens3R/exps/code-release/bmvs/scan3.sh",
"chars": 2352,
"preview": "\nscan_name=\"scan3\"\n\nDATA_ROOT=data\nINST_REL_DIR=BlendedMVS/$scan_name\nFORCE_RERUN=True\n\n# Coarse decomposition\npython th"
},
{
"path": "AutoReconForDens3R/exps/code-release/bmvs/scan4.sh",
"chars": 2352,
"preview": "\nscan_name=\"scan4\"\n\nDATA_ROOT=data\nINST_REL_DIR=BlendedMVS/$scan_name\nFORCE_RERUN=True\n\n# Coarse decomposition\npython th"
},
{
"path": "AutoReconForDens3R/exps/code-release/bmvs/scan5.sh",
"chars": 2352,
"preview": "\nscan_name=\"scan5\"\n\nDATA_ROOT=data\nINST_REL_DIR=BlendedMVS/$scan_name\nFORCE_RERUN=True\n\n# Coarse decomposition\npython th"
},
{
"path": "AutoReconForDens3R/exps/code-release/bmvs/scan6.sh",
"chars": 2352,
"preview": "\nscan_name=\"scan6\"\n\nDATA_ROOT=data\nINST_REL_DIR=BlendedMVS/$scan_name\nFORCE_RERUN=True\n\n# Coarse decomposition\npython th"
},
{
"path": "AutoReconForDens3R/exps/code-release/co3d_demo/scan1.sh",
"chars": 2444,
"preview": "\nscan_name=scan1\n\nDATA_ROOT=data\nINST_REL_DIR=CO3D_DEMO/$scan_name\nFORCE_RERUN=True\n\npython third_party/AutoDecomp/auto_"
},
{
"path": "AutoReconForDens3R/exps/code-release/co3d_demo/scan2.sh",
"chars": 2444,
"preview": "\nscan_name=scan2\n\nDATA_ROOT=data\nINST_REL_DIR=CO3D_DEMO/$scan_name\nFORCE_RERUN=True\n\npython third_party/AutoDecomp/auto_"
},
{
"path": "AutoReconForDens3R/exps/code-release/co3d_demo/scan3.sh",
"chars": 2444,
"preview": "\nscan_name=scan3\n\nDATA_ROOT=data\nINST_REL_DIR=CO3D_DEMO/$scan_name\nFORCE_RERUN=True\n\npython third_party/AutoDecomp/auto_"
},
{
"path": "AutoReconForDens3R/exps/code-release/co3d_demo/scan4.sh",
"chars": 2444,
"preview": "\nscan_name=scan4\n\nDATA_ROOT=data\nINST_REL_DIR=CO3D_DEMO/$scan_name\nFORCE_RERUN=True\n\npython third_party/AutoDecomp/auto_"
},
{
"path": "AutoReconForDens3R/exps/code-release/co3d_demo/scan5.sh",
"chars": 2444,
"preview": "\nscan_name=scan5\n\nDATA_ROOT=data\nINST_REL_DIR=CO3D_DEMO/$scan_name\nFORCE_RERUN=True\n\npython third_party/AutoDecomp/auto_"
},
{
"path": "AutoReconForDens3R/exps/code-release/run_dens3r_recon.sh",
"chars": 2991,
"preview": "\n#/bin/bash\n\nDATA_ROOT=/path/to/your/data\nINST_REL_DIR=rel_path/to/your/data\nDENS3R_MODEL_PATH=/path/to/pth/file/of/mode"
},
{
"path": "AutoReconForDens3R/exps/code-release/run_pipeline_demo_low-res.sh",
"chars": 2515,
"preview": "\nDATA_ROOT=data\nINST_REL_DIR=custom_data_example/co3d_chair\nFORCE_RERUN=True\n\n# Coarse decomposition\npython third_party/"
},
{
"path": "AutoReconForDens3R/nerfstudio/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/camera_optimizers.py",
"chars": 4835,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/camera_paths.py",
"chars": 5550,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/camera_utils.py",
"chars": 15376,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/cameras.py",
"chars": 39682,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/lie_groups.py",
"chars": 4770,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/cameras/rays.py",
"chars": 14218,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/__init__.py",
"chars": 4751,
"preview": "from __future__ import annotations\n\nfrom typing import Dict, Sequence\nfrom functools import partial\n\nimport tyro\nfrom sk"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/baseline.py",
"chars": 4159,
"preview": "from . import *\n\n# TODO: neus w/o hash grid + NeRF++ scene parameterization\n\n# neus (MLP-based) w/ a BG Model (l2 scene "
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/common.py",
"chars": 19025,
"preview": "from . import *\n\n\n# assume the fg object is bounded by a unit cube, and the entire scene is\n# contracted to [-2, 2] as d"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/distilled_neusfacto.py",
"chars": 6875,
"preview": "from . import *\n\n\n# 2-stages training of neus-facto:\n# 1. train nerfacto; (nerfacto is very robust to the scene conditio"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/feature_field.py",
"chars": 6784,
"preview": "from . import *\n\n\nneus_facto_wbg_fast_dff = Config(\n method_name=\"neus-facto-wbg-fast_dff\",\n trainer=TrainerConfig"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/neusfacto_fast.py",
"chars": 3428,
"preview": "from . import *\n\nneus_facto_wbg_fast = Config(\n method_name=\"neus-facto-wbg-fast\",\n trainer=TrainerConfig(\n "
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/regularization.py",
"chars": 11654,
"preview": "from . import *\n\n# nesu-facto w/ explicit pointcloud regularization + separate plane nerf field modeling\nneus_facto_wbg_"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/autorecon/semantic_nerf.py",
"chars": 1088,
"preview": "from . import *\n\n\nautorecon_semantic_nerf = Config(\n method_name=\"autorecon_semantic-nerfw\",\n trainer=TrainerConfi"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/base_config.py",
"chars": 11385,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/config_utils.py",
"chars": 1547,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/configs/method_configs.py",
"chars": 32074,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/engine/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/engine/callbacks.py",
"chars": 3976,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/engine/optimizers.py",
"chars": 7436,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/engine/schedulers.py",
"chars": 7572,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/engine/trainer.py",
"chars": 16608,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/exporter/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/exporter/exporter_utils.py",
"chars": 10734,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/exporter/mesh_culling_utils.py",
"chars": 6883,
"preview": "\nfrom __future__ import annotations\n\nfrom pathlib import Path\nfrom typing import Optional, Tuple, Type\nfrom typing_exten"
},
{
"path": "AutoReconForDens3R/nerfstudio/exporter/texture_utils.py",
"chars": 20770,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/exporter/tsdf_utils.py",
"chars": 13712,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/__init__.py",
"chars": 751,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/activations.py",
"chars": 1441,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/base_field_component.py",
"chars": 2089,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/embedding.py",
"chars": 1657,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/encodings.py",
"chars": 28268,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/field_heads.py",
"chars": 6984,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/mlp.py",
"chars": 3645,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/progressive_encoding.py",
"chars": 2443,
"preview": "\nfrom rich.console import Console\n\nimport torch\nimport tinycudann as tcnn\nfrom torch import nn\n\nCONSOLE = Console(width="
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/spatial_distortions.py",
"chars": 7139,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/field_components/temporal_distortions.py",
"chars": 3339,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/base_field.py",
"chars": 4642,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/density_fields.py",
"chars": 5137,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/feature_field.py",
"chars": 14489,
"preview": "from dataclasses import dataclass, field\nfrom typing import Dict, Optional, Tuple, Type\n\nfrom rich.console import Consol"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/instant_ngp_field.py",
"chars": 7998,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/nerfacto_field.py",
"chars": 18609,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/nerfw_field.py",
"chars": 6831,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/sdf_field.py",
"chars": 30075,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/semantic_nerf_field.py",
"chars": 4264,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/tensorf_field.py",
"chars": 5797,
"preview": "# Copyright 2022 The Plenoptix Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Licen"
},
{
"path": "AutoReconForDens3R/nerfstudio/fields/vanilla_nerf_field.py",
"chars": 5575,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/losses.py",
"chars": 22522,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/patch_warping.py",
"chars": 9564,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/ray_generators.py",
"chars": 2158,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/ray_samplers.py",
"chars": 76453,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/renderers.py",
"chars": 11804,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/model_components/scene_colliders.py",
"chars": 9708,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/base_model.py",
"chars": 8476,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/base_surface_model.py",
"chars": 49685,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/distilled_neus_facto.py",
"chars": 10140,
"preview": "\"\"\"\nTwo-stages training of NeuSFacto: 1. use NeRF in both fg and bg; 2. replace NeRF with NeuSFacto in fg.\n\"\"\"\nfrom __fu"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/dto.py",
"chars": 23420,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/instant_ngp.py",
"chars": 9954,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/mipnerf.py",
"chars": 7751,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/monosdf.py",
"chars": 1322,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/nerfacto.py",
"chars": 15228,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/neuralreconW.py",
"chars": 2550,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/neus.py",
"chars": 4636,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/neus_acc.py",
"chars": 5019,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/neus_facto.py",
"chars": 12839,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/neus_facto_dff.py",
"chars": 22426,
"preview": "\"\"\"\nNeuSFacto + DistilledFeatureField\n\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass, field\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/neus_facto_reg.py",
"chars": 13252,
"preview": "\"\"\"\nNeuSFacto w/ explicit point cloud regularization for scene decomposition.\n\"\"\"\nfrom __future__ import annotations\n\nfr"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/semantic_nerfw.py",
"chars": 13416,
"preview": "# Copyright 2022 The nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/tensorf.py",
"chars": 10813,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/unisurf.py",
"chars": 5043,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/vanilla_nerf.py",
"chars": 9746,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/models/volsdf.py",
"chars": 3044,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/pipelines/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/pipelines/base_pipeline.py",
"chars": 18679,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/pipelines/dynamic_batch.py",
"chars": 4217,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/colmap_utils.py",
"chars": 24629,
"preview": "\"\"\"\nHere we have modified code taken from COLMAP for parsing data in the COLMAP format.\nOriginal file at:\nhttps://github"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/hloc_utils.py",
"chars": 3680,
"preview": "\"\"\"\nCode that uses the hierarchical localization toolbox (hloc)\nto extract and match image features, estimate camera pos"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/insta360_utils.py",
"chars": 7402,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/metashape_utils.py",
"chars": 4747,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/polycam_utils.py",
"chars": 3602,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/process_data_utils.py",
"chars": 9495,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/process_data/record3d_utils.py",
"chars": 2994,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/py.typed",
"chars": 0,
"preview": ""
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/bilateral_solver.py",
"chars": 7362,
"preview": "import numpy as np\nfrom scipy import ndimage\nfrom scipy.sparse import diags\nfrom scipy.sparse.linalg import cg\nfrom scip"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/colormaps.py",
"chars": 3322,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/colors.py",
"chars": 1673,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/comms.py",
"chars": 2409,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/decorators.py",
"chars": 2462,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/eval_utils.py",
"chars": 4699,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/func_utils.py",
"chars": 91,
"preview": "\nfrom typing import Sequence\n\n\ndef get_first_element(elems: Sequence):\n return elems[0]\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/images.py",
"chars": 1996,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/install_checks.py",
"chars": 1478,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/io.py",
"chars": 1264,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/marching_cubes.py",
"chars": 8669,
"preview": "from pathlib import Path\nfrom rich import console\n\nimport numpy as np\nimport pymeshlab\nimport torch\nimport trimesh\nfrom "
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/mask_utils.py",
"chars": 1505,
"preview": "\nfrom typing import Dict, Any\n\nimport torch\nfrom torchtyping import TensorType\n\n\ndef alpha_composite(rgb0: torch.Tensor,"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/math.py",
"chars": 6811,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/misc.py",
"chars": 4865,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/plotly_utils.py",
"chars": 16180,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/pointclouds.py",
"chars": 6107,
"preview": "from copy import deepcopy\nfrom dataclasses import dataclass, fields\nfrom typing import Optional\n\nimport torch\nfrom rich."
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/poses.py",
"chars": 2621,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/printing.py",
"chars": 1573,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/profiler.py",
"chars": 3704,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/rich_utils.py",
"chars": 2315,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/scheduler.py",
"chars": 735,
"preview": "\"\"\"Scheduler for arbitrary weights\"\"\"\nimport math\nimport numpy as np\nfrom functools import lru_cache\n\n\ndef cosine_anneal"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/scripts.py",
"chars": 1543,
"preview": "\"\"\"Helpers for running script commands.\"\"\"\n# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/tensor_dataclass.py",
"chars": 13383,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/vis_utils.py",
"chars": 1453,
"preview": "\nimport cv2\nimport numpy as np\nimport torch\nfrom rich.console import Console\nfrom scipy.interpolate import interp1d\nfrom"
},
{
"path": "AutoReconForDens3R/nerfstudio/utils/writer.py",
"chars": 17033,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/.eslintrc.json",
"chars": 716,
"preview": "{\n \"env\": {\n \"browser\": true,\n \"es2021\": true\n },\n \"extends\": [\n \"eslint:recommended\",\n \"plugin:react/rec"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/.gitignore",
"chars": 179,
"preview": ".idea/\n.vscode/\nnode_modules/\nbuild/\n.DS_Store\n*.tgz\nmy-app*\ntemplate/src/__tests__/__snapshots__/\nlerna-debug.log\nnpm-d"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/package.json",
"chars": 2272,
"preview": "{\n \"name\": \"viewer\",\n \"homepage\": \".\",\n \"version\": \"22-12-02-0\",\n \"private\": true,\n \"dependencies\": {\n \"@emotion"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/public/electron.js",
"chars": 1207,
"preview": "const path = require('path');\n\nconst { app, BrowserWindow } = require('electron');\nconst isDev = require('electron-is-de"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/public/index.html",
"chars": 2839,
"preview": "<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <!-- Google tag (gtag.js) -->\n <script\n async\n src=\"https://w"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/public/manifest.json",
"chars": 185,
"preview": "{\n \"short_name\": \"nerfstudio viewer\",\n \"name\": \"Interactive NeRF viewer\",\n \"start_url\": \".\",\n \"display\": \"standalone"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/public/robots.txt",
"chars": 67,
"preview": "# https://www.robotstxt.org/robotstxt.html\nUser-agent: *\nDisallow:\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/requirements.txt",
"chars": 28,
"preview": "tyro>=0.3.22\nsshconf==0.2.5\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/run_deploy.py",
"chars": 4658,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/App.jsx",
"chars": 1292,
"preview": "import { CssBaseline, ThemeProvider } from '@mui/material';\nimport React from 'react';\nimport {\n SceneTreeWebSocketList"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/SceneNode.js",
"chars": 3456,
"preview": "/* eslint-disable no-underscore-dangle */\n/* eslint-disable no-restricted-syntax */\nimport * as THREE from 'three';\n\nfun"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/index.jsx",
"chars": 430,
"preview": "import './index.scss';\nimport React from 'react';\nimport ReactDOM from 'react-dom';\nimport { Provider } from 'react-redu"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/index.scss",
"chars": 6558,
"preview": "body {\n margin: 0;\n font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',\n 'Ubuntu', 'Can"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Banner/Banner.jsx",
"chars": 2031,
"preview": "import React from 'react';\nimport { useDispatch } from 'react-redux';\n\nimport Button from '@mui/material/Button';\nimport"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Banner/index.jsx",
"chars": 55,
"preview": "import Banner from './Banner';\n\nexport default Banner;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ConfigPanel/ConfigPanel.jsx",
"chars": 6425,
"preview": "import { buttonGroup, useControls } from 'leva';\nimport { useContext, useEffect, useState } from 'react';\nimport { useDi"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ConfigPanel/ConfigPanelSlice.js",
"chars": 483,
"preview": "// The function below is called a selector and allows us to select a value from\n// the state. Selectors can also be defi"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/LandingModal/LandingModal.jsx",
"chars": 5435,
"preview": "/* eslint-disable react/jsx-props-no-spreading */\nimport * as React from 'react';\n\nimport {\n Box,\n Button,\n Modal,\n "
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/LandingModal/index.jsx",
"chars": 73,
"preview": "import LandingModel from './LandingModal';\n\nexport default LandingModel;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/LogPanel/LogPanel.jsx",
"chars": 2190,
"preview": "import { useContext, useEffect } from 'react';\nimport { useDispatch, useSelector } from 'react-redux';\n\nimport { WebSock"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/RenderModal/RenderModal.jsx",
"chars": 2902,
"preview": "/* eslint-disable react/jsx-props-no-spreading */\nimport * as React from 'react';\n\nimport { Box, Button, Modal, TextFiel"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/RenderModal/index.jsx",
"chars": 70,
"preview": "import RenderModal from './RenderModal';\n\nexport default RenderModal;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Scene/Scene.jsx",
"chars": 12051,
"preview": "/* eslint-disable no-restricted-syntax */\nimport * as THREE from 'three';\n\nimport { useContext, useEffect } from 'react'"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/Scene/drawing.js",
"chars": 4558,
"preview": "/* eslint-disable no-restricted-syntax */\n// ---- code for drawing with three.js ----\nimport * as THREE from 'three';\n\ne"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/CameraHelper.js",
"chars": 3361,
"preview": "import * as THREE from 'three';\nimport { MeshLine, MeshLineMaterial } from 'meshline';\n\n// eslint-disable-next-line no-u"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/CameraPanel.jsx",
"chars": 41434,
"preview": "import * as React from 'react';\nimport * as THREE from 'three';\n\nimport {\n ArrowBackIosNew,\n ArrowForwardIos,\n AllInc"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/curve.js",
"chars": 2539,
"preview": "// Code for creating a curve from a set of points\n\nimport * as THREE from 'three';\n\nfunction get_catmull_rom_curve(list_"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/CameraPanel/index.jsx",
"chars": 70,
"preview": "import CameraPanel from './CameraPanel';\n\nexport default CameraPanel;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/ExportPanel.jsx",
"chars": 4012,
"preview": "/* eslint-disable react/jsx-props-no-spreading */\nimport * as React from 'react';\nimport * as THREE from 'three';\nimport"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/MeshSubPanel.jsx",
"chars": 7002,
"preview": "import * as React from 'react';\nimport { useControls, useStoreContext } from 'leva';\nimport { useEffect } from 'react';\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/PointcloudSubPanel.jsx",
"chars": 4080,
"preview": "import * as React from 'react';\nimport { useControls, useStoreContext } from 'leva';\nimport { useEffect } from 'react';\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ExportPanel/index.jsx",
"chars": 70,
"preview": "import ExportPanel from './ExportPanel';\n\nexport default ExportPanel;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ScenePanel/ScenePanel.jsx",
"chars": 6676,
"preview": "import * as React from 'react';\nimport * as THREE from 'three';\n\nimport { FaTractor } from 'react-icons/fa';\n\nimport { B"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/ScenePanel/index.jsx",
"chars": 67,
"preview": "import ScenePanel from './ScenePanel';\n\nexport default ScenePanel;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/SidePanel.jsx",
"chars": 3932,
"preview": "/* eslint-disable react/jsx-props-no-spreading */\n\nimport * as React from 'react';\nimport * as THREE from 'three';\n\nimpo"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/StatusPanel/StatusPanel.jsx",
"chars": 5014,
"preview": "import * as React from 'react';\n\nimport Button from '@mui/material/Button';\nimport { ButtonGroup } from '@mui/material';"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/SidePanel/StatusPanel/index.jsx",
"chars": 70,
"preview": "import StatusPanel from './StatusPanel';\n\nexport default StatusPanel;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ViewerWindow/ViewerWindow.jsx",
"chars": 8078,
"preview": "import * as THREE from 'three';\n\nimport React, { useContext, useEffect, useRef } from 'react';\nimport { SelectChangeEven"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ViewerWindow/ViewerWindowSlice.js",
"chars": 0,
"preview": ""
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ViewportControlsModal/ViewportControlsModal.jsx",
"chars": 1344,
"preview": "/* eslint-disable react/jsx-props-no-spreading */\nimport * as React from 'react';\n\nimport { Box, Button, Modal } from '@"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/ViewportControlsModal/index.jsx",
"chars": 100,
"preview": "import ViewportControlsModal from './ViewportControlsModal';\n\nexport default ViewportControlsModal;\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/WebRtcWindow/WebRtcWindow.jsx",
"chars": 5307,
"preview": "import React, { createContext, useContext, useEffect, useRef } from 'react';\n\nimport { useDispatch } from 'react-redux';"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/WebSocket/WebSocket.jsx",
"chars": 1913,
"preview": "// Much of this code comes from or is inspired by:\n// https://www.pluralsight.com/guides/using-web-sockets-in-your-react"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/modules/WebSocketUrlField.jsx",
"chars": 1219,
"preview": "import * as React from 'react';\n\nimport { TextField, Link } from '@mui/material';\nimport { useDispatch, useSelector } fr"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/reducer.js",
"chars": 2658,
"preview": "import { split_path } from './utils';\n\nconst initialState = {\n // the websocket connection state\n websocketState: {\n "
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/setupTests.js",
"chars": 255,
"preview": "// jest-dom adds custom jest matchers for asserting on DOM nodes.\n// allows you to do things like:\n// expect(element).to"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/store.js",
"chars": 146,
"preview": "import { configureStore } from '@reduxjs/toolkit';\nimport rootReducer from './reducer';\n\nexport default configureStore({"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/subscriber.js",
"chars": 571,
"preview": "import { useContext } from 'react';\nimport { ReactReduxContext } from 'react-redux';\n\nexport function subscribe_to_chang"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/themes/leva_theme.json",
"chars": 1165,
"preview": "{\n \"colors\": {\n \"elevation1\": \"#292d39\",\n \"elevation2\": \"#222831\",\n \"elevation3\": \"#393E46\",\n \"accent1\": \"#"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/themes/theme.ts",
"chars": 3512,
"preview": "import { createTheme } from '@mui/material/styles';\n\nexport const appTheme = createTheme({\n palette: {\n primary: { m"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/app/src/utils.js",
"chars": 99,
"preview": "export function split_path(path_str) {\n return path_str.split('/').filter((x) => x.length > 0);\n}\n"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/README.md",
"chars": 1065,
"preview": "# Python Kernel and Client Viewer App communication\n\n> The purpose of this document is to explain how to communicate fro"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/__init__.py",
"chars": 605,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/path.py",
"chars": 1550,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/server.py",
"chars": 8633,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/state/node.py",
"chars": 1715,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/state/state_node.py",
"chars": 922,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
},
{
"path": "AutoReconForDens3R/nerfstudio/viewer/server/subprocess.py",
"chars": 3402,
"preview": "# Copyright 2022 The Nerfstudio Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"Lice"
}
]
// ... and 575 more files (download for full content)
About this extraction
This page contains the full source code of the G-1nOnly/Dens3R GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 775 files (126.6 MB), approximately 9.1M tokens, and a symbol index with 3758 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.