Repository: MIT-SPARK/Loc-NeRF
Branch: main
Commit: 0d859be5e489
Files: 540
Total size: 120.5 KB
Directory structure:
gitextract_e4tgfkjc/
├── CMakeLists.txt
├── LICENSE
├── README.md
├── cfg/
│ ├── jackal.yaml
│ ├── llff.yaml
│ └── llff_global.yaml
├── launch/
│ └── navigate.launch
├── logs/
│ ├── backups/
│ │ ├── global_loc/
│ │ │ ├── gt_fern_15_poses.npy
│ │ │ ├── gt_fern_18_poses.npy
│ │ │ ├── gt_fern_2_poses.npy
│ │ │ ├── gt_fern_3_poses.npy
│ │ │ ├── gt_fern_7_poses.npy
│ │ │ ├── gt_fortress_11_poses.npy
│ │ │ ├── gt_fortress_16_poses.npy
│ │ │ ├── gt_fortress_19_poses.npy
│ │ │ ├── gt_fortress_1_poses.npy
│ │ │ ├── gt_fortress_9_poses.npy
│ │ │ ├── gt_horns_13_poses.npy
│ │ │ ├── gt_horns_1_poses.npy
│ │ │ ├── gt_horns_5_poses.npy
│ │ │ ├── gt_horns_6_poses.npy
│ │ │ ├── gt_horns_8_poses.npy
│ │ │ ├── gt_room_14_poses.npy
│ │ │ ├── gt_room_17_poses.npy
│ │ │ ├── gt_room_18_poses.npy
│ │ │ ├── gt_room_3_poses.npy
│ │ │ ├── gt_room_6_poses.npy
│ │ │ ├── initial_pose_fern_15_poses.npy
│ │ │ ├── initial_pose_fern_18_poses.npy
│ │ │ ├── initial_pose_fern_2_poses.npy
│ │ │ ├── initial_pose_fern_3_poses.npy
│ │ │ ├── initial_pose_fern_7_poses.npy
│ │ │ ├── initial_pose_fortress_11_poses.npy
│ │ │ ├── initial_pose_fortress_16_poses.npy
│ │ │ ├── initial_pose_fortress_19_poses.npy
│ │ │ ├── initial_pose_fortress_1_poses.npy
│ │ │ ├── initial_pose_fortress_9_poses.npy
│ │ │ ├── initial_pose_horns_13_poses.npy
│ │ │ ├── initial_pose_horns_1_poses.npy
│ │ │ ├── initial_pose_horns_5_poses.npy
│ │ │ ├── initial_pose_horns_6_poses.npy
│ │ │ ├── initial_pose_horns_8_poses.npy
│ │ │ ├── initial_pose_room_14_poses.npy
│ │ │ ├── initial_pose_room_17_poses.npy
│ │ │ ├── initial_pose_room_18_poses.npy
│ │ │ ├── initial_pose_room_3_poses.npy
│ │ │ ├── initial_pose_room_6_poses.npy
│ │ │ ├── mocnerf_fern_15_forward_passes.npy
│ │ │ ├── mocnerf_fern_15_poses.npy
│ │ │ ├── mocnerf_fern_18_forward_passes.npy
│ │ │ ├── mocnerf_fern_18_poses.npy
│ │ │ ├── mocnerf_fern_2_forward_passes.npy
│ │ │ ├── mocnerf_fern_2_poses.npy
│ │ │ ├── mocnerf_fern_3_forward_passes.npy
│ │ │ ├── mocnerf_fern_3_poses.npy
│ │ │ ├── mocnerf_fern_7_forward_passes.npy
│ │ │ ├── mocnerf_fern_7_poses.npy
│ │ │ ├── mocnerf_fortress_11_forward_passes.npy
│ │ │ ├── mocnerf_fortress_11_poses.npy
│ │ │ ├── mocnerf_fortress_16_forward_passes.npy
│ │ │ ├── mocnerf_fortress_16_poses.npy
│ │ │ ├── mocnerf_fortress_19_forward_passes.npy
│ │ │ ├── mocnerf_fortress_19_poses.npy
│ │ │ ├── mocnerf_fortress_1_forward_passes.npy
│ │ │ ├── mocnerf_fortress_1_poses.npy
│ │ │ ├── mocnerf_fortress_9_forward_passes.npy
│ │ │ ├── mocnerf_fortress_9_poses.npy
│ │ │ ├── mocnerf_horns_13_forward_passes.npy
│ │ │ ├── mocnerf_horns_13_poses.npy
│ │ │ ├── mocnerf_horns_1_forward_passes.npy
│ │ │ ├── mocnerf_horns_1_poses.npy
│ │ │ ├── mocnerf_horns_5_forward_passes.npy
│ │ │ ├── mocnerf_horns_5_poses.npy
│ │ │ ├── mocnerf_horns_6_forward_passes.npy
│ │ │ ├── mocnerf_horns_6_poses.npy
│ │ │ ├── mocnerf_horns_8_forward_passes.npy
│ │ │ ├── mocnerf_horns_8_poses.npy
│ │ │ ├── mocnerf_room_14_forward_passes.npy
│ │ │ ├── mocnerf_room_14_poses.npy
│ │ │ ├── mocnerf_room_17_forward_passes.npy
│ │ │ ├── mocnerf_room_17_poses.npy
│ │ │ ├── mocnerf_room_18_forward_passes.npy
│ │ │ ├── mocnerf_room_18_poses.npy
│ │ │ ├── mocnerf_room_3_forward_passes.npy
│ │ │ ├── mocnerf_room_3_poses.npy
│ │ │ ├── mocnerf_room_6_forward_passes.npy
│ │ │ ├── mocnerf_room_6_poses.npy
│ │ │ ├── mocnerf_started_fern_15_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_15_poses.npy
│ │ │ ├── mocnerf_started_fern_18_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_18_poses.npy
│ │ │ ├── mocnerf_started_fern_2_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_2_poses.npy
│ │ │ ├── mocnerf_started_fern_3_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_3_poses.npy
│ │ │ ├── mocnerf_started_fern_7_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_7_poses.npy
│ │ │ ├── mocnerf_started_fortress_11_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_11_poses.npy
│ │ │ ├── mocnerf_started_fortress_16_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_16_poses.npy
│ │ │ ├── mocnerf_started_fortress_19_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_19_poses.npy
│ │ │ ├── mocnerf_started_fortress_1_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_1_poses.npy
│ │ │ ├── mocnerf_started_fortress_9_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_9_poses.npy
│ │ │ ├── mocnerf_started_horns_13_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_13_poses.npy
│ │ │ ├── mocnerf_started_horns_1_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_1_poses.npy
│ │ │ ├── mocnerf_started_horns_5_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_5_poses.npy
│ │ │ ├── mocnerf_started_horns_6_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_6_poses.npy
│ │ │ ├── mocnerf_started_horns_8_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_8_poses.npy
│ │ │ ├── mocnerf_started_room_14_forward_passes.npy
│ │ │ ├── mocnerf_started_room_14_poses.npy
│ │ │ ├── mocnerf_started_room_17_forward_passes.npy
│ │ │ ├── mocnerf_started_room_17_poses.npy
│ │ │ ├── mocnerf_started_room_18_forward_passes.npy
│ │ │ ├── mocnerf_started_room_18_poses.npy
│ │ │ ├── mocnerf_started_room_3_forward_passes.npy
│ │ │ ├── mocnerf_started_room_3_poses.npy
│ │ │ ├── mocnerf_started_room_6_forward_passes.npy
│ │ │ └── mocnerf_started_room_6_poses.npy
│ │ └── inerf_compare/
│ │ ├── gt_fern_10_poses.npy
│ │ ├── gt_fern_12_poses.npy
│ │ ├── gt_fern_13_poses.npy
│ │ ├── gt_fern_18_poses.npy
│ │ ├── gt_fern_7_poses.npy
│ │ ├── gt_fortress_12_poses.npy
│ │ ├── gt_fortress_15_poses.npy
│ │ ├── gt_fortress_1_poses.npy
│ │ ├── gt_fortress_3_poses.npy
│ │ ├── gt_fortress_5_poses.npy
│ │ ├── gt_horns_0_poses.npy
│ │ ├── gt_horns_16_poses.npy
│ │ ├── gt_horns_17_poses.npy
│ │ ├── gt_horns_2_poses.npy
│ │ ├── gt_horns_7_poses.npy
│ │ ├── gt_room_16_poses.npy
│ │ ├── gt_room_3_poses.npy
│ │ ├── gt_room_6_poses.npy
│ │ ├── gt_room_7_poses.npy
│ │ ├── gt_room_8_poses.npy
│ │ ├── inerf_fern_10_poses.npy
│ │ ├── inerf_fern_12_poses.npy
│ │ ├── inerf_fern_13_poses.npy
│ │ ├── inerf_fern_18_poses.npy
│ │ ├── inerf_fern_7_poses.npy
│ │ ├── inerf_fortress_12_poses.npy
│ │ ├── inerf_fortress_15_poses.npy
│ │ ├── inerf_fortress_1_poses.npy
│ │ ├── inerf_fortress_3_poses.npy
│ │ ├── inerf_fortress_5_poses.npy
│ │ ├── inerf_horns_0_poses.npy
│ │ ├── inerf_horns_16_poses.npy
│ │ ├── inerf_horns_17_poses.npy
│ │ ├── inerf_horns_2_poses.npy
│ │ ├── inerf_horns_7_poses.npy
│ │ ├── inerf_room_16_poses.npy
│ │ ├── inerf_room_3_poses.npy
│ │ ├── inerf_room_6_poses.npy
│ │ ├── inerf_room_7_poses.npy
│ │ ├── inerf_room_8_poses.npy
│ │ ├── initial_pose_fern_10_poses.npy
│ │ ├── initial_pose_fern_12_poses.npy
│ │ ├── initial_pose_fern_13_poses.npy
│ │ ├── initial_pose_fern_18_poses.npy
│ │ ├── initial_pose_fern_7_poses.npy
│ │ ├── initial_pose_fortress_12_poses.npy
│ │ ├── initial_pose_fortress_15_poses.npy
│ │ ├── initial_pose_fortress_1_poses.npy
│ │ ├── initial_pose_fortress_3_poses.npy
│ │ ├── initial_pose_fortress_5_poses.npy
│ │ ├── initial_pose_horns_0_poses.npy
│ │ ├── initial_pose_horns_16_poses.npy
│ │ ├── initial_pose_horns_17_poses.npy
│ │ ├── initial_pose_horns_2_poses.npy
│ │ ├── initial_pose_horns_7_poses.npy
│ │ ├── initial_pose_room_16_poses.npy
│ │ ├── initial_pose_room_3_poses.npy
│ │ ├── initial_pose_room_6_poses.npy
│ │ ├── initial_pose_room_7_poses.npy
│ │ ├── initial_pose_room_8_poses.npy
│ │ ├── mocnerf_fern_10_forward_passes.npy
│ │ ├── mocnerf_fern_10_poses.npy
│ │ ├── mocnerf_fern_12_forward_passes.npy
│ │ ├── mocnerf_fern_12_poses.npy
│ │ ├── mocnerf_fern_13_forward_passes.npy
│ │ ├── mocnerf_fern_13_poses.npy
│ │ ├── mocnerf_fern_18_forward_passes.npy
│ │ ├── mocnerf_fern_18_poses.npy
│ │ ├── mocnerf_fern_7_forward_passes.npy
│ │ ├── mocnerf_fern_7_poses.npy
│ │ ├── mocnerf_fortress_12_forward_passes.npy
│ │ ├── mocnerf_fortress_12_poses.npy
│ │ ├── mocnerf_fortress_15_forward_passes.npy
│ │ ├── mocnerf_fortress_15_poses.npy
│ │ ├── mocnerf_fortress_1_forward_passes.npy
│ │ ├── mocnerf_fortress_1_poses.npy
│ │ ├── mocnerf_fortress_3_forward_passes.npy
│ │ ├── mocnerf_fortress_3_poses.npy
│ │ ├── mocnerf_fortress_5_forward_passes.npy
│ │ ├── mocnerf_fortress_5_poses.npy
│ │ ├── mocnerf_horns_0_forward_passes.npy
│ │ ├── mocnerf_horns_0_poses.npy
│ │ ├── mocnerf_horns_16_forward_passes.npy
│ │ ├── mocnerf_horns_16_poses.npy
│ │ ├── mocnerf_horns_17_forward_passes.npy
│ │ ├── mocnerf_horns_17_poses.npy
│ │ ├── mocnerf_horns_2_forward_passes.npy
│ │ ├── mocnerf_horns_2_poses.npy
│ │ ├── mocnerf_horns_7_forward_passes.npy
│ │ ├── mocnerf_horns_7_poses.npy
│ │ ├── mocnerf_room_16_forward_passes.npy
│ │ ├── mocnerf_room_16_poses.npy
│ │ ├── mocnerf_room_3_forward_passes.npy
│ │ ├── mocnerf_room_3_poses.npy
│ │ ├── mocnerf_room_6_forward_passes.npy
│ │ ├── mocnerf_room_6_poses.npy
│ │ ├── mocnerf_room_7_forward_passes.npy
│ │ ├── mocnerf_room_7_poses.npy
│ │ ├── mocnerf_room_8_forward_passes.npy
│ │ ├── mocnerf_room_8_poses.npy
│ │ ├── mocnerf_started_fern_10_forward_passes.npy
│ │ ├── mocnerf_started_fern_10_poses.npy
│ │ ├── mocnerf_started_fern_12_forward_passes.npy
│ │ ├── mocnerf_started_fern_12_poses.npy
│ │ ├── mocnerf_started_fern_13_forward_passes.npy
│ │ ├── mocnerf_started_fern_13_poses.npy
│ │ ├── mocnerf_started_fern_18_forward_passes.npy
│ │ ├── mocnerf_started_fern_18_poses.npy
│ │ ├── mocnerf_started_fern_7_forward_passes.npy
│ │ ├── mocnerf_started_fern_7_poses.npy
│ │ ├── mocnerf_started_fortress_12_forward_passes.npy
│ │ ├── mocnerf_started_fortress_12_poses.npy
│ │ ├── mocnerf_started_fortress_15_forward_passes.npy
│ │ ├── mocnerf_started_fortress_15_poses.npy
│ │ ├── mocnerf_started_fortress_1_forward_passes.npy
│ │ ├── mocnerf_started_fortress_1_poses.npy
│ │ ├── mocnerf_started_fortress_3_forward_passes.npy
│ │ ├── mocnerf_started_fortress_3_poses.npy
│ │ ├── mocnerf_started_fortress_5_forward_passes.npy
│ │ ├── mocnerf_started_fortress_5_poses.npy
│ │ ├── mocnerf_started_horns_0_forward_passes.npy
│ │ ├── mocnerf_started_horns_0_poses.npy
│ │ ├── mocnerf_started_horns_16_forward_passes.npy
│ │ ├── mocnerf_started_horns_16_poses.npy
│ │ ├── mocnerf_started_horns_17_forward_passes.npy
│ │ ├── mocnerf_started_horns_17_poses.npy
│ │ ├── mocnerf_started_horns_2_forward_passes.npy
│ │ ├── mocnerf_started_horns_2_poses.npy
│ │ ├── mocnerf_started_horns_7_forward_passes.npy
│ │ ├── mocnerf_started_horns_7_poses.npy
│ │ ├── mocnerf_started_room_16_forward_passes.npy
│ │ ├── mocnerf_started_room_16_poses.npy
│ │ ├── mocnerf_started_room_3_forward_passes.npy
│ │ ├── mocnerf_started_room_3_poses.npy
│ │ ├── mocnerf_started_room_6_forward_passes.npy
│ │ ├── mocnerf_started_room_6_poses.npy
│ │ ├── mocnerf_started_room_7_forward_passes.npy
│ │ ├── mocnerf_started_room_7_poses.npy
│ │ ├── mocnerf_started_room_8_forward_passes.npy
│ │ └── mocnerf_started_room_8_poses.npy
│ ├── global_loc/
│ │ ├── gt_fern_15_poses.npy
│ │ ├── gt_fern_18_poses.npy
│ │ ├── gt_fern_2_poses.npy
│ │ ├── gt_fern_3_poses.npy
│ │ ├── gt_fern_7_poses.npy
│ │ ├── gt_fortress_11_poses.npy
│ │ ├── gt_fortress_16_poses.npy
│ │ ├── gt_fortress_19_poses.npy
│ │ ├── gt_fortress_1_poses.npy
│ │ ├── gt_fortress_9_poses.npy
│ │ ├── gt_horns_13_poses.npy
│ │ ├── gt_horns_1_poses.npy
│ │ ├── gt_horns_5_poses.npy
│ │ ├── gt_horns_6_poses.npy
│ │ ├── gt_horns_8_poses.npy
│ │ ├── gt_room_14_poses.npy
│ │ ├── gt_room_17_poses.npy
│ │ ├── gt_room_18_poses.npy
│ │ ├── gt_room_3_poses.npy
│ │ ├── gt_room_6_poses.npy
│ │ ├── initial_pose_fern_15_poses.npy
│ │ ├── initial_pose_fern_18_poses.npy
│ │ ├── initial_pose_fern_2_poses.npy
│ │ ├── initial_pose_fern_3_poses.npy
│ │ ├── initial_pose_fern_7_poses.npy
│ │ ├── initial_pose_fortress_11_poses.npy
│ │ ├── initial_pose_fortress_16_poses.npy
│ │ ├── initial_pose_fortress_19_poses.npy
│ │ ├── initial_pose_fortress_1_poses.npy
│ │ ├── initial_pose_fortress_9_poses.npy
│ │ ├── initial_pose_horns_13_poses.npy
│ │ ├── initial_pose_horns_1_poses.npy
│ │ ├── initial_pose_horns_5_poses.npy
│ │ ├── initial_pose_horns_6_poses.npy
│ │ ├── initial_pose_horns_8_poses.npy
│ │ ├── initial_pose_room_14_poses.npy
│ │ ├── initial_pose_room_17_poses.npy
│ │ ├── initial_pose_room_18_poses.npy
│ │ ├── initial_pose_room_3_poses.npy
│ │ ├── initial_pose_room_6_poses.npy
│ │ ├── mocnerf_fern_15_forward_passes.npy
│ │ ├── mocnerf_fern_15_poses.npy
│ │ ├── mocnerf_fern_18_forward_passes.npy
│ │ ├── mocnerf_fern_18_poses.npy
│ │ ├── mocnerf_fern_2_forward_passes.npy
│ │ ├── mocnerf_fern_2_poses.npy
│ │ ├── mocnerf_fern_3_forward_passes.npy
│ │ ├── mocnerf_fern_3_poses.npy
│ │ ├── mocnerf_fern_7_forward_passes.npy
│ │ ├── mocnerf_fern_7_poses.npy
│ │ ├── mocnerf_fortress_11_forward_passes.npy
│ │ ├── mocnerf_fortress_11_poses.npy
│ │ ├── mocnerf_fortress_16_forward_passes.npy
│ │ ├── mocnerf_fortress_16_poses.npy
│ │ ├── mocnerf_fortress_19_forward_passes.npy
│ │ ├── mocnerf_fortress_19_poses.npy
│ │ ├── mocnerf_fortress_1_forward_passes.npy
│ │ ├── mocnerf_fortress_1_poses.npy
│ │ ├── mocnerf_fortress_9_forward_passes.npy
│ │ ├── mocnerf_fortress_9_poses.npy
│ │ ├── mocnerf_horns_13_forward_passes.npy
│ │ ├── mocnerf_horns_13_poses.npy
│ │ ├── mocnerf_horns_1_forward_passes.npy
│ │ ├── mocnerf_horns_1_poses.npy
│ │ ├── mocnerf_horns_5_forward_passes.npy
│ │ ├── mocnerf_horns_5_poses.npy
│ │ ├── mocnerf_horns_6_forward_passes.npy
│ │ ├── mocnerf_horns_6_poses.npy
│ │ ├── mocnerf_horns_8_forward_passes.npy
│ │ ├── mocnerf_horns_8_poses.npy
│ │ ├── mocnerf_room_14_forward_passes.npy
│ │ ├── mocnerf_room_14_poses.npy
│ │ ├── mocnerf_room_17_forward_passes.npy
│ │ ├── mocnerf_room_17_poses.npy
│ │ ├── mocnerf_room_18_forward_passes.npy
│ │ ├── mocnerf_room_18_poses.npy
│ │ ├── mocnerf_room_3_forward_passes.npy
│ │ ├── mocnerf_room_3_poses.npy
│ │ ├── mocnerf_room_6_forward_passes.npy
│ │ ├── mocnerf_room_6_poses.npy
│ │ ├── mocnerf_started_fern_15_forward_passes.npy
│ │ ├── mocnerf_started_fern_15_poses.npy
│ │ ├── mocnerf_started_fern_18_forward_passes.npy
│ │ ├── mocnerf_started_fern_18_poses.npy
│ │ ├── mocnerf_started_fern_2_forward_passes.npy
│ │ ├── mocnerf_started_fern_2_poses.npy
│ │ ├── mocnerf_started_fern_3_forward_passes.npy
│ │ ├── mocnerf_started_fern_3_poses.npy
│ │ ├── mocnerf_started_fern_7_forward_passes.npy
│ │ ├── mocnerf_started_fern_7_poses.npy
│ │ ├── mocnerf_started_fortress_11_forward_passes.npy
│ │ ├── mocnerf_started_fortress_11_poses.npy
│ │ ├── mocnerf_started_fortress_16_forward_passes.npy
│ │ ├── mocnerf_started_fortress_16_poses.npy
│ │ ├── mocnerf_started_fortress_19_forward_passes.npy
│ │ ├── mocnerf_started_fortress_19_poses.npy
│ │ ├── mocnerf_started_fortress_1_forward_passes.npy
│ │ ├── mocnerf_started_fortress_1_poses.npy
│ │ ├── mocnerf_started_fortress_9_forward_passes.npy
│ │ ├── mocnerf_started_fortress_9_poses.npy
│ │ ├── mocnerf_started_horns_13_forward_passes.npy
│ │ ├── mocnerf_started_horns_13_poses.npy
│ │ ├── mocnerf_started_horns_1_forward_passes.npy
│ │ ├── mocnerf_started_horns_1_poses.npy
│ │ ├── mocnerf_started_horns_5_forward_passes.npy
│ │ ├── mocnerf_started_horns_5_poses.npy
│ │ ├── mocnerf_started_horns_6_forward_passes.npy
│ │ ├── mocnerf_started_horns_6_poses.npy
│ │ ├── mocnerf_started_horns_8_forward_passes.npy
│ │ ├── mocnerf_started_horns_8_poses.npy
│ │ ├── mocnerf_started_room_14_forward_passes.npy
│ │ ├── mocnerf_started_room_14_poses.npy
│ │ ├── mocnerf_started_room_17_forward_passes.npy
│ │ ├── mocnerf_started_room_17_poses.npy
│ │ ├── mocnerf_started_room_18_forward_passes.npy
│ │ ├── mocnerf_started_room_18_poses.npy
│ │ ├── mocnerf_started_room_3_forward_passes.npy
│ │ ├── mocnerf_started_room_3_poses.npy
│ │ ├── mocnerf_started_room_6_forward_passes.npy
│ │ └── mocnerf_started_room_6_poses.npy
│ └── inerf_compare/
│ ├── gt_fern_10_poses.npy
│ ├── gt_fern_12_poses.npy
│ ├── gt_fern_13_poses.npy
│ ├── gt_fern_18_poses.npy
│ ├── gt_fern_7_poses.npy
│ ├── gt_fortress_12_poses.npy
│ ├── gt_fortress_15_poses.npy
│ ├── gt_fortress_1_poses.npy
│ ├── gt_fortress_3_poses.npy
│ ├── gt_fortress_5_poses.npy
│ ├── gt_horns_0_poses.npy
│ ├── gt_horns_16_poses.npy
│ ├── gt_horns_17_poses.npy
│ ├── gt_horns_2_poses.npy
│ ├── gt_horns_7_poses.npy
│ ├── gt_room_16_poses.npy
│ ├── gt_room_3_poses.npy
│ ├── gt_room_6_poses.npy
│ ├── gt_room_7_poses.npy
│ ├── gt_room_8_poses.npy
│ ├── inerf_fern_10_poses.npy
│ ├── inerf_fern_12_poses.npy
│ ├── inerf_fern_13_poses.npy
│ ├── inerf_fern_18_poses.npy
│ ├── inerf_fern_7_poses.npy
│ ├── inerf_fortress_12_poses.npy
│ ├── inerf_fortress_15_poses.npy
│ ├── inerf_fortress_1_poses.npy
│ ├── inerf_fortress_3_poses.npy
│ ├── inerf_fortress_5_poses.npy
│ ├── inerf_horns_0_poses.npy
│ ├── inerf_horns_16_poses.npy
│ ├── inerf_horns_17_poses.npy
│ ├── inerf_horns_2_poses.npy
│ ├── inerf_horns_7_poses.npy
│ ├── inerf_room_16_poses.npy
│ ├── inerf_room_3_poses.npy
│ ├── inerf_room_6_poses.npy
│ ├── inerf_room_7_poses.npy
│ ├── inerf_room_8_poses.npy
│ ├── initial_pose_fern_10_poses.npy
│ ├── initial_pose_fern_12_poses.npy
│ ├── initial_pose_fern_13_poses.npy
│ ├── initial_pose_fern_18_poses.npy
│ ├── initial_pose_fern_7_poses.npy
│ ├── initial_pose_fortress_12_poses.npy
│ ├── initial_pose_fortress_15_poses.npy
│ ├── initial_pose_fortress_1_poses.npy
│ ├── initial_pose_fortress_3_poses.npy
│ ├── initial_pose_fortress_5_poses.npy
│ ├── initial_pose_horns_0_poses.npy
│ ├── initial_pose_horns_16_poses.npy
│ ├── initial_pose_horns_17_poses.npy
│ ├── initial_pose_horns_2_poses.npy
│ ├── initial_pose_horns_7_poses.npy
│ ├── initial_pose_room_16_poses.npy
│ ├── initial_pose_room_3_poses.npy
│ ├── initial_pose_room_6_poses.npy
│ ├── initial_pose_room_7_poses.npy
│ ├── initial_pose_room_8_poses.npy
│ ├── mocnerf_fern_10_forward_passes.npy
│ ├── mocnerf_fern_10_poses.npy
│ ├── mocnerf_fern_12_forward_passes.npy
│ ├── mocnerf_fern_12_poses.npy
│ ├── mocnerf_fern_13_forward_passes.npy
│ ├── mocnerf_fern_13_poses.npy
│ ├── mocnerf_fern_18_forward_passes.npy
│ ├── mocnerf_fern_18_poses.npy
│ ├── mocnerf_fern_7_forward_passes.npy
│ ├── mocnerf_fern_7_poses.npy
│ ├── mocnerf_fortress_12_forward_passes.npy
│ ├── mocnerf_fortress_12_poses.npy
│ ├── mocnerf_fortress_15_forward_passes.npy
│ ├── mocnerf_fortress_15_poses.npy
│ ├── mocnerf_fortress_1_forward_passes.npy
│ ├── mocnerf_fortress_1_poses.npy
│ ├── mocnerf_fortress_3_forward_passes.npy
│ ├── mocnerf_fortress_3_poses.npy
│ ├── mocnerf_fortress_5_forward_passes.npy
│ ├── mocnerf_fortress_5_poses.npy
│ ├── mocnerf_horns_0_forward_passes.npy
│ ├── mocnerf_horns_0_poses.npy
│ ├── mocnerf_horns_16_forward_passes.npy
│ ├── mocnerf_horns_16_poses.npy
│ ├── mocnerf_horns_17_forward_passes.npy
│ ├── mocnerf_horns_17_poses.npy
│ ├── mocnerf_horns_2_forward_passes.npy
│ ├── mocnerf_horns_2_poses.npy
│ ├── mocnerf_horns_7_forward_passes.npy
│ ├── mocnerf_horns_7_poses.npy
│ ├── mocnerf_room_16_forward_passes.npy
│ ├── mocnerf_room_16_poses.npy
│ ├── mocnerf_room_3_forward_passes.npy
│ ├── mocnerf_room_3_poses.npy
│ ├── mocnerf_room_6_forward_passes.npy
│ ├── mocnerf_room_6_poses.npy
│ ├── mocnerf_room_7_forward_passes.npy
│ ├── mocnerf_room_7_poses.npy
│ ├── mocnerf_room_8_forward_passes.npy
│ ├── mocnerf_room_8_poses.npy
│ ├── mocnerf_started_fern_10_forward_passes.npy
│ ├── mocnerf_started_fern_10_poses.npy
│ ├── mocnerf_started_fern_12_forward_passes.npy
│ ├── mocnerf_started_fern_12_poses.npy
│ ├── mocnerf_started_fern_13_forward_passes.npy
│ ├── mocnerf_started_fern_13_poses.npy
│ ├── mocnerf_started_fern_18_forward_passes.npy
│ ├── mocnerf_started_fern_18_poses.npy
│ ├── mocnerf_started_fern_7_forward_passes.npy
│ ├── mocnerf_started_fern_7_poses.npy
│ ├── mocnerf_started_fortress_12_forward_passes.npy
│ ├── mocnerf_started_fortress_12_poses.npy
│ ├── mocnerf_started_fortress_15_forward_passes.npy
│ ├── mocnerf_started_fortress_15_poses.npy
│ ├── mocnerf_started_fortress_1_forward_passes.npy
│ ├── mocnerf_started_fortress_1_poses.npy
│ ├── mocnerf_started_fortress_3_forward_passes.npy
│ ├── mocnerf_started_fortress_3_poses.npy
│ ├── mocnerf_started_fortress_5_forward_passes.npy
│ ├── mocnerf_started_fortress_5_poses.npy
│ ├── mocnerf_started_horns_0_forward_passes.npy
│ ├── mocnerf_started_horns_0_poses.npy
│ ├── mocnerf_started_horns_16_forward_passes.npy
│ ├── mocnerf_started_horns_16_poses.npy
│ ├── mocnerf_started_horns_17_forward_passes.npy
│ ├── mocnerf_started_horns_17_poses.npy
│ ├── mocnerf_started_horns_2_forward_passes.npy
│ ├── mocnerf_started_horns_2_poses.npy
│ ├── mocnerf_started_horns_7_forward_passes.npy
│ ├── mocnerf_started_horns_7_poses.npy
│ ├── mocnerf_started_room_16_forward_passes.npy
│ ├── mocnerf_started_room_16_poses.npy
│ ├── mocnerf_started_room_3_forward_passes.npy
│ ├── mocnerf_started_room_3_poses.npy
│ ├── mocnerf_started_room_6_forward_passes.npy
│ ├── mocnerf_started_room_6_poses.npy
│ ├── mocnerf_started_room_7_forward_passes.npy
│ ├── mocnerf_started_room_7_poses.npy
│ ├── mocnerf_started_room_8_forward_passes.npy
│ └── mocnerf_started_room_8_poses.npy
├── package.xml
├── requirements.txt
├── rviz/
│ └── rviz.rviz
├── setup.py
├── src/
│ ├── full_filter.py
│ ├── full_nerf_helpers.py
│ ├── nav_node.py
│ ├── navigator_base.py
│ ├── particle_filter.py
│ ├── render_helpers.py
│ └── utils.py
└── tools/
├── eval_logs.py
└── ros_to_jpg.py
================================================
FILE CONTENTS
================================================
================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required(VERSION 2.8.3)
project(locnerf)
find_package(catkin REQUIRED COMPONENTS
geometry_msgs
sensor_msgs
std_msgs
rospy
)
#set(cv_bridge_DIR /usr/local/share/cv_bridge/cmake)
catkin_python_setup()
catkin_package()
include_directories(
${catkin_INCLUDE_DIRS}
)
catkin_install_python(PROGRAMS src/nav_node.py
DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
)
================================================
FILE: LICENSE
================================================
MIT License
Copyright (c) 2022 Massachusetts Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: README.md
================================================
# Loc-NeRF
Monte Carlo Localization using Neural Radiance Fields.
## Coordinate Frames
To be consistent throughout the code and in the yaml files, we define coordinates using the camera frame commonly used for NeRF (x right, y up, z inward from the perspective of the camera) unless stated otherwise. Coordinates are FROM Camera TO World unless otherwise stated. Note this is not the same as the more common camera frame used in robotics (x right, y down, z outward).
## Publications
If you find this code relevant for your work, please consider citing our paper:
[Loc-NeRF: Monte Carlo Localization using Neural Radiance Fields](https://arxiv.org/abs/2209.09050)
This work was done in collaboration with MIT and Draper Labs and was partially funded
by the NASA Flight Opportunities under grant Nos 80NSSC21K0348, ARL DCIST CRA W911NF-17-2-0181, and an Amazon Research Award.
# 1. Installation
## A. Prerequisities
- Install ROS by following [ROS website](http://wiki.ros.org/ROS/Installation).
- If you want to use VIO for the predict step for a real robot demo, install a VIO such as
[VINS-Fusion](https://github.com/HKUST-Aerial-Robotics/VINS-Fusion) or [Kimera](https://github.com/MIT-SPARK/Kimera-VIO-ROS).
# 2. Loc-NeRF installation
```bash
# Setup catkin workspace
mkdir -p ~/catkin_ws/src
cd ~/catkin_ws/
catkin init
# Clone repo
cd ~/catkin_ws/src
git clone https://github.com/MIT-SPARK/Loc-NeRF
# Compile code
catkin build
# source workspace
source ~/catkin_ws/devel/setup.bash
#install dependencies:
cd ~/catkin_ws/src/Loc-NeRF
pip install -r requirements.txt
```
## Starting Loc-NeRF
We will use ROS and rviz as a structure for running Loc-NeRF and for visualizing performance.
As a general good practice, remember to source your workspace for each terminal you use.
1. Open a new terminal and run: `roscore`
2. In another terminal, launch Loc-NeRF:
```bash
roslaunch locnerf navigate.launch parameter_file:=<param_file.yaml>
```
3. In another terminal, launch rviz for visualization:
```bash
rviz -d $(rospack find locnerf)/rviz/rviz.rviz
```
4. If you are not running with a rosbag, i.e. you are using LLFF data, then Loc-NeRF should start and you should be set. If you are using a rosbag, continue to the next steps.
5. In another terminal launch VIO
6. Finally, in another terminal, play your rosbag:
```bash
rosbag play /PATH/TO/ROSBAG
```
## Provided config files
We provide three yaml files in /cfg to get you started.
```jackal.yaml``` is setup to run a real-time demo. The predict step runs at the rate of available prediction poses (for which we used VIO). The update step processes incoming images as fast as computation limits will allow (in our case about 2.5 Hz) and discards all other images to prevent large latency.
```llff.yaml``` runs Loc-NeRF on the LLFF dataset as described in our paper.
```llff_global.yaml``` runs Loc-NeRF on the LLFF dataset with a wider spread of particles to test the ability to perform global localization as described in our paper.
# 3. Usage
Currently we provide example code to run Loc-NeRF on two types of data: running on LLFF data to compare with iNeRF (cite), and running a real-time
demo with custom data. For both we use NeRF-Pytorch (cite) as our NeRF map.
The fastest way to start running Loc-NeRF is to download LLFF data with pre-trained NeRF weights. We also provide instructions for running a demo on a real robot, which requires training a NeRF with metric scaled poses.
## Using LLFF data
Download LLFF images and pretrained NeRF-Pytorch weights from [NeRF-Pytorch](https://github.com/yenchenlin/nerf-pytorch). If you download our fork of iNeRF here:
[iNeRF](https://github.com/Dominic101/inerf) then the configs and ckpts folder will already be setup correctly with the pre-trained weights, and you just need to add the data folder from NeRF-Pytorch.
Place data using the following structure:
```
├── configs
│ ├── ...
├── ckpts
│ │ ├── fern
| | | └── fern.tar
│ │ ├── fortress
| | | └── fortress.tar
│ │ ├── horns
| | | └── horns.tar
│ │ ├── room
| | | └── room.tar
| | └── ...
├── data
│ ├── nerf_llff_data
│ │ └── fern # downloaded llff dataset
│ │ └── fortress # downloaded llff dataset
│ │ └── horns # downloaded llff dataset
| | └── room # downloaded llff dataset
| | └── ...
```
After updating your yaml file ```llff.yaml``` with the directory where you placed the data and any other params you want to change, you are ready to
run Loc-NeRF! By default, Loc-NeRF will estimate the camera pose of 5 random images from each of fern, fortress, horns, room (20 images in total). You can use rviz to provide real-time visualization of the ground truth pose and the particles.
### Plotting results
If you log results from Loc-NeRF, we provide code to plot the position and rotation error inside ```tools/eval_logs.py```. The script also contains
code to automatically run iNeRF using the same initial conditions that Loc-NeRF used and plot the results of iNeRF.
If you don't want to run iNeRF, running the plotting code is as easy as changing a few parameters at the top of ```tools/eval_logs.py``` and then running
```python3 tools/eval_logs.py```. Note that ```run_inerf``` should be set to False.
#### Automatically generating iNeRF - Loc-NeRF comparison
Install our fork of [iNeRF](https://github.com/Dominic101/inerf) which is set up to interface with our automatic comparison script.
To automatically run iNeRF after you have logged data from Loc-NeRF (because we need to know the initial start for iNeRF), run the following:
``` python3 tools/eval_logs.py --config /home/dominic/inerf/configs/fern.txt --data_dir /home/dominic/inerf/data/nerf_llff_data/ --ckpt_dir /home/dominic/inerf/ckpts/ --N_importance 64 --batch_size 2048```. Note that ```run_inerf``` should be set to True. This will both run iNeRF and log the results so you don't need to rerun iNeRF. Note that to make the interface to iNeRF easier, we directly pass a specific config in the cmd line (in this case fern.txt), but setting N_importance and batch_size directly, along with the provided scripts, ensure that the correct params are used for each LLFF dataset.
## Using Custom data for real-time experiment
To run Loc-NeRF on your robot or simulator, you will first need to train a NeRF with metric scaled poses. This is important so that a movement estimated
in the world by the predict step roughly corresponds to a movement inside the NeRF. There are several options for training a NeRF, but for ours we
used https://colmap.github.io/faq.html#geo-registration. We first estimate a unscaled trajectory with Colmap and then
provide Colmap with a small subset of scaled poses that it uses to scale the entire trajectory.
Our procedure is as follows:
1. Record a rosbag with RGB images (for NeRF) along with stereo images and IMU (for VIO).
2. use the provided script ```tools/ros_to_jpg.py``` to convert a downsampled set of images from the rosbag into images for Colmap
3. Use the imgs2poses.py script provided at [LLFF](https://github.com/fyusion/llff) to generate Colmap poses for the trajectory up to scale.
4. You should now have a set of poses up to scale. To add scale, you will need to determine the metric scaled pose of a small subset of images (we used five images) and use the provided Colmap function at
[colmap-add-scale](https://colmap.github.io/faq.html#geo-registration) to generate optimized metric scaled poses for your dataset.
5. Use our fork of NeRF-Pytorch at [train-nerf](https://github.com/Dominic101/nerf-pytorch) to train a NeRF with metric scaled poses.
6. Now you are all set to run Loc-NeRF with a rosbag for a real-time demo.
# 4. FAQ
This code is under active development with more updates coming soon. Below are some known common issues/TODOs. We also welcome PRs and feedback of any encountered issues to keep improving this code for the community:
1. We use ROS as a way to provide real-time visualzation with rviz and and as a structure to run on real robot platforms. For some users, a reliance on ROS may be an uneccessary requirement and future work is to separate Loc-NeRF from a ROS wrapper.
# Third-party code:
Parts of this code were based on [this pytorch implementation of iNeRF](https://github.com/salykovaa/inerf) and [NeRF-Pytorch](https://github.com/yenchenlin/nerf-pytorch).
```
NeRF-Pytorch:
@misc{lin2020nerfpytorch,
title={NeRF-pytorch},
author={Yen-Chen, Lin},
publisher = {GitHub},
journal = {GitHub repository},
howpublished={\url{https://github.com/yenchenlin/nerf-pytorch/}},
year={2020}
}
```
================================================
FILE: cfg/jackal.yaml
================================================
#### main filtering flags ####
run_predicts: True # Use odometry for predict step, otherwise use 0 mean gaussian noise.
use_received_image: True # Update the sensor image.
center_about_true_pose: False # Use gt orientation as the mean pose to generate an initial start pose - if False use origin.
use_weighted_avg: False # Use weighted average to computer position estimate, otherwise use simple average.
no_ndc: True # note that custom data cannot use ndc but LLFF should
#### main update step params ####
factor: 4 # image down-sample factor
course_samples: 64 # number course samples per ray
fine_samples: 64 # number fine samples per ray
photometric_loss: rgb # rgb
sampling_strategy: random # only random is supported right now
batch_size: 32 # number of pixels to use for measurement points
num_particles: 400 # Number of initial particles.
#### dataset and weight loading ####
dataset_type: custom # llff, custom
model_name: nerf_hallway_low
ckpt_dir: /home/dominic/nerf-pytorch/logs
data_dir: /home/dominic/inerf/data/nerf_llff_data # only needed for LLFF data
#### convergence protection #####
# randomly sample a few particle positions to protect against false convergence
# TODO eventually want to just remove the lowest weight particles instead of random particles
use_convergence_protection: True
number_convergence_particles: 15 # number of particles to distribute
convergence_noise: 0.2 # meters, uniformly sample this far from mean for each axis.
#### inerf comparision ####
run_inerf_compare: False
global_loc_mode: False # Perform Loc-NeRF with Loc-NeRF.
log_results: False # Save pose estimate for each iteration. Also save a file with gt pose and the initial pose guess if use_logged_start is False.
use_logged_start: True # use saved logs for initial pose
log_prefix: use_annealing # add a prefix to the log file names for convenience to keep track of different runs
log_directory: /home/dominic/mocNeRF_ws/src/lonerf/logs/inerf_compare # Directory to save npy files.
forward_passes_limit: 78643200 # end optimization after this number of forward passes have run
#### custom data realtime tracking mode params ####
# Note these values are all overwritten/not used if run_inerf_compare is True
# subscriber topics
rgb_topic: /camera_d455/color/image_raw
vio_topic: /vins_estimator/odometry
focal: 635
H: 720
W: 1280
# Near and far bounds of the NeRF scene, should be similar to how the NeRF was trained.
near: 0
far: 8
# set distribution of particles, units are meters and degrees
min_bounds:
px: -0.5
py: -0.3
pz: 0.0
rz: -2.5
ry: -179.0
rx: -2.5
max_bounds:
px: 0.5
py: 0.2
pz: 3.5
rz: 2.5
ry: 179.0
rx: 2.5
# rotation from the body frame of VIO to the NeRF camera frame (x right, y up, z inward). all values in radians
R_bodyVins_camNerf:
rz: 0.0
ry: 0.0
rx: 3.14159256
#### Nerf Navigation comparision ####
# uses user set distribution of particles
run_nerfnav_compare: False
nerf_nav_directory : /home/dominic/Downloads/input_data1
#### particle annealing params ####
use_refining: True # Decrease position noise as we start to converge.
use_particle_reduction: True # Reduce number of particles as they start to converge
min_number_particles: 150 # reduce particles to this amount during particle reduction
# position standard deviation thresholds to activate particle annealing
alpha_refine: 0.18
alpha_super_refine: 0.03
##### motion model noise params ####
# position noise
px_noise: 0.002
py_noise: 0.002
pz_noise: 0.003
# rotation noise
rot_x_noise: 0.002
rot_y_noise: 0.002
rot_z_noise: 0.002
##### visualization params #####
visualize_particles: True
view_debug_image_iteration: 20 # view NeRF rendered image at estimated pose after number of iterations (set to 0 to disable)
####### DO NOT CHANGE THESE UNLESS YOU KNOW WHAT YOU ARE DOING ########
# These are params from iNeRF and NeRF-Pytorch
# In general they should be the same as how the NeRF model was changed
multires: 10 # log2 of max freq for positional encoding (3D location)
multires_views: 4 # log2 of max freq for positional encoding (2D direction)
i_embed: 0 # set 0 for default positional encoding, -1 for none
netdepth: 8 # layers in network
netwidth: 256 # channels per layer
netdepth_fine: 8 # layers in fine network
netwidth_fine: 256 # channels per layer in fine network
use_viewdirs: True
perturb: 0 # set to 0. for no jitter, 1. for jitter
white_bkgd: False # set to render synthetic data on a white bkgd (always use for dvoxels)
raw_noise_std: 1.0 # std dev of noise added to regularize sigma_a output, 1e0 recommended
lindisp: False # sampling linearly in disparity rather than depth
chunk: 65536 # 1024x64 # number of rays processed in parallel, decrease if running out of memory
netchunk: 65536 # 1024x64 # number of pts sent through network in parallel, decrease if running out of memory
bd_factor: 0.75
kernel_size: 3
lrate: 0.01
dil_iter: 3 # only used with interest_regions sampling
================================================
FILE: cfg/llff.yaml
================================================
#### filtering flags ####
run_predicts: False # Use odometry for predict step, otherwise use 0 mean gaussian noise.
use_received_image: False # Update the sensor image.
center_about_true_pose: True # Use gt orientation as the mean pose to generate an initial start pose - if False use origin.
use_weighted_avg: True # Use weighted average to computer position estimate, otherwise use simple average.
no_ndc: False # note that custom data cannot use ndc but LLFF should
#### main update step params ####
factor: 4 # image down-sample factor
course_samples: 64 # number course samples per ray
fine_samples: 64 # number fine samples per ray
photometric_loss: rgb # rgb
sampling_strategy: random # only random is supported right now
batch_size: 64 # number of pixels to use for measurement points
num_particles: 300 # Number of initial particles.
#### dataset and weight loading ####
dataset_type: llff # llff, custom
ckpt_dir: /home/dominic/inerf/ckpts
data_dir: /home/dominic/inerf/data/nerf_llff_data # only needed for LLFF data
#### convergence protection #####
# randomly sample a few particle positions to protect against false convergence
# TODO eventually want to just remove the lowest weight particles instead of random particles
use_convergence_protection: True
number_convergence_particles: 10 # number of particles to distribute
convergence_noise: 0.2 # meters, uniformly sample this far from mean for each axis.
#### inerf comparision ####
run_inerf_compare: True
global_loc_mode: False # Perform Loc-NeRF with Loc-NeRF.
log_results: False # Save pose estimate for each iteration. Also save a file with gt pose and the initial pose guess if use_logged_start is False.
use_logged_start: True # use saved logs for initial pose
log_prefix: use_annealing # add a prefix to the log file names for convenience to keep track of different runs
log_directory: /home/dominic/mocNeRF_ws/src/locnerf/logs/inerf_compare # Directory to save npy files.
forward_passes_limit: 78643200 # end optimization after this number of forward passes have run
#### custom data realtime tracking mode params ####
# Note these values are all overwritten/not used if run_inerf_compare is True
# subscriber topics
rgb_topic: /camera/color/image_raw
vio_topic: /vins_estimator/odometry
focal: 635
H: 720
W: 1280
# Near and far bounds of the NeRF scene, should be similar to how the NeRF was trained.
near: 0
far: 8
# set distribution of particles, units are meters and degrees
min_bounds:
px: -0.5
py: -0.3
pz: 0.0
rz: -2.5
ry: -179.0
rx: -2.5
max_bounds:
px: 0.5
py: 0.2
pz: 3.5
rz: 2.5
ry: 179.0
rx: 2.5
# rotation from the body frame of VIO to the NeRF camera frame (x right, y up, z inward). all values in radians
R_bodyVins_camNerf:
rz: 0.0
ry: 0.0
rx: 3.14159256
#### Nerf Navigation comparision ####
# uses user set distribution of particles
run_nerfnav_compare: False
nerf_nav_directory : /home/dominic/Downloads/input_data1
#### particle annealing params ####
use_refining: True # Decrease position noise as we start to converge.
use_particle_reduction: True # Reduce number of particles as they start to converge
min_number_particles: 100 # reduce particles to this amount during particle reduction
# position standard deviation thresholds to activate particle annealing
alpha_refine: 0.08
alpha_super_refine: 0.03
#### motion model noise params ####
# position noise
px_noise: 0.01
py_noise: 0.01
pz_noise: 0.01
# rotation noise
rot_x_noise: 0.02
rot_y_noise: 0.02
rot_z_noise: 0.02
#### visualization params ####
visualize_particles: True # publish particles for rviz
view_debug_image_iteration: 0 # view NeRF rendered image at estimated pose after number of iterations (set to 0 to disable)
####### DO NOT CHANGE THESE UNLESS YOU KNOW WHAT YOU ARE DOING ########
# These are params from iNeRF and NeRF-Pytorch
# In general they should be the same as how the NeRF model was changed
multires: 10 # log2 of max freq for positional encoding (3D location)
multires_views: 4 # log2 of max freq for positional encoding (2D direction)
i_embed: 0 # set 0 for default positional encoding, -1 for none
netdepth: 8 # layers in network
netwidth: 256 # channels per layer
netdepth_fine: 8 # layers in fine network
netwidth_fine: 256 # channels per layer in fine network
use_viewdirs: True
perturb: 0 # set to 0. for no jitter, 1. for jitter
white_bkgd: False # set to render synthetic data on a white bkgd (always use for dvoxels)
raw_noise_std: 1.0 # std dev of noise added to regularize sigma_a output, 1e0 recommended
lindisp: False # sampling linearly in disparity rather than depth
chunk: 65536 # 1024x64 # number of rays processed in parallel, decrease if running out of memory
netchunk: 65536 # 1024x64 # number of pts sent through network in parallel, decrease if running out of memory
bd_factor: 0.75
kernel_size: 3
lrate: 0.01
dil_iter: 3 # only used with interest_regions sampling
================================================
FILE: cfg/llff_global.yaml
================================================
#### filtering flags ####
run_predicts: False # Use odometry for predict step, otherwise use 0 mean gaussian noise.
use_received_image: False # Update the sensor image.
center_about_true_pose: True # Use gt orientation as the mean pose to generate an initial start pose - if False use origin.
use_weighted_avg: True # Use weighted average to computer position estimate, otherwise use simple average.
no_ndc: False # note that custom data cannot use ndc but LLFF should
#### main update step params ####
factor: 4 # image down-sample factor
course_samples: 64 # number course samples per ray
fine_samples: 64 # number fine samples per ray
photometric_loss: rgb # rgb
sampling_strategy: random # only random is supported right now
batch_size: 32 # number of pixels to use for measurement points
num_particles: 600 # Number of initial particles.
#### dataset and weight loading ####
dataset_type: llff # llff, custom
ckpt_dir: /home/dominic/inerf/ckpts
data_dir: /home/dominic/inerf/data/nerf_llff_data # only needed for LLFF data
#### convergence protection #####
# randomly sample a few particle positions to protect against false convergence
# TODO eventually want to just remove the lowest weight particles instead of random particles
use_convergence_protection: True
number_convergence_particles: 10 # number of particles to distribute
convergence_noise: 0.2 # meters, uniformly sample this far from mean for each axis.
#### inerf comparision ####
run_inerf_compare: True
global_loc_mode: True # Perform Loc-NeRF with Loc-NeRF.
log_results: False # Save pose estimate for each iteration. Also save a file with gt pose and the initial pose guess if use_logged_start is False.
use_logged_start: True # use saved logs for initial pose
log_prefix: use_annealing # add a prefix to the log file names for convenience to keep track of different runs
log_directory: /home/dominic/mocNeRF_ws/src/locnerf/logs/global_loc # Directory to save npy files.
forward_passes_limit: 78643200 # end optimization after this number of forward passes have run
#### custom data realtime tracking mode params ####
# Note these values are all overwritten/not used if run_inerf_compare is True
# subscriber topics
rgb_topic: /camera/color/image_raw
vio_topic: /vins_estimator/odometry
focal: 635
H: 720
W: 1280
# Near and far bounds of the NeRF scene, should be similar to how the NeRF was trained.
near: 0
far: 8
# set distribution of particles, units are meters and degrees
min_bounds:
px: -0.5
py: -0.3
pz: 0.0
rz: -2.5
ry: -179.0
rx: -2.5
max_bounds:
px: 0.5
py: 0.2
pz: 3.5
rz: 2.5
ry: 179.0
rx: 2.5
# rotation from the body frame of VIO to the NeRF camera frame (x right, y up, z inward). all values in radians
R_bodyVins_camNerf:
rz: 0.0
ry: 0.0
rx: 3.14159256
#### Nerf Navigation comparision ####
# uses user set distribution of particles
run_nerfnav_compare: False
nerf_nav_directory : /home/dominic/Downloads/input_data1
#### particle annealing params ####
use_refining: True # Decrease position noise as we start to converge.
use_particle_reduction: True # Reduce number of particles as they start to converge
min_number_particles: 100 # reduce particles to this amount during particle reduction
# position standard deviation thresholds to activate particle annealing
alpha_refine: 0.08
alpha_super_refine: 0.03
#### motion model noise params ####
# position noise
px_noise: 0.02
py_noise: 0.02
pz_noise: 0.02
# rotation noise
rot_x_noise: 0.01
rot_y_noise: 0.04
rot_z_noise: 0.01
#### visualization params ####
visualize_particles: True # publish particles for rviz
view_debug_image_iteration: 0 # view NeRF rendered image at estimated pose after number of iterations (set to 0 to disable)
####### DO NOT CHANGE THESE UNLESS YOU KNOW WHAT YOU ARE DOING ########
# These are params from iNeRF and NeRF-Pytorch
# In general they should be the same as how the NeRF model was changed
multires: 10 # log2 of max freq for positional encoding (3D location)
multires_views: 4 # log2 of max freq for positional encoding (2D direction)
i_embed: 0 # set 0 for default positional encoding, -1 for none
netdepth: 8 # layers in network
netwidth: 256 # channels per layer
netdepth_fine: 8 # layers in fine network
netwidth_fine: 256 # channels per layer in fine network
use_viewdirs: True
perturb: 0 # set to 0. for no jitter, 1. for jitter
white_bkgd: False # set to render synthetic data on a white bkgd (always use for dvoxels)
raw_noise_std: 1.0 # std dev of noise added to regularize sigma_a output, 1e0 recommended
lindisp: False # sampling linearly in disparity rather than depth
chunk: 65536 # 1024x64 # number of rays processed in parallel, decrease if running out of memory
netchunk: 65536 # 1024x64 # number of pts sent through network in parallel, decrease if running out of memory
bd_factor: 0.75
kernel_size: 3
lrate: 0.01
dil_iter: 3 # only used with interest_regions sampling
================================================
FILE: launch/navigate.launch
================================================
<launch>
<arg name="parameter_file" default="config.yaml" />
<rosparam command="load" file="$(find locnerf)/cfg/$(arg parameter_file)" />
<node pkg="locnerf" type="nav_node.py" name="nav_node" output="screen" />
</launch>
================================================
FILE: package.xml
================================================
<?xml version="1.0"?>
<package>
<name>locnerf</name>
<version>0.1.0</version>
<description>Run particle filter with NeRF and VIO</description>
<maintainer email="drmaggio@mit.edu">Dominic Maggio</maintainer>
<license>MIT</license>
<buildtool_depend>catkin</buildtool_depend>
<build_depend>geometry_msgs</build_depend>
<build_depend>sensor_msgs</build_depend>
<build_depend>std_msgs</build_depend>
<export>
</export>
</package>
================================================
FILE: requirements.txt
================================================
cv_bridge
gtsam
imageio
matplotlib
numpy
rospy
rostopic
scipy
sensor_msgs
opencv_contrib_python
torch
torch_ema
torchvision
tqdm
================================================
FILE: rviz/rviz.rviz
================================================
Panels:
- Class: rviz/Displays
Help Height: 78
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /Status1
- /PoseArray1
- /PoseArray2
- /Image1
- /Odometry1
Splitter Ratio: 0.5
Tree Height: 213
- Class: rviz/Selection
Name: Selection
- Class: rviz/Tool Properties
Expanded:
- /2D Pose Estimate1
- /2D Nav Goal1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
- Class: rviz/Time
Experimental: false
Name: Time
SyncMode: 0
SyncSource: ""
Preferences:
PromptSaveOnExit: true
Toolbars:
toolButtonStyle: 2
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Alpha: 1
Arrow Length: 0.30000001192092896
Axes Length: 0.30000001192092896
Axes Radius: 0.009999999776482582
Class: rviz/PoseArray
Color: 255; 25; 0
Enabled: true
Head Length: 0.07000000029802322
Head Radius: 0.029999999329447746
Name: PoseArray
Queue Size: 10
Shaft Length: 0.23000000417232513
Shaft Radius: 0.009999999776482582
Shape: Arrow (Flat)
Topic: /particles
Unreliable: false
Value: true
- Alpha: 1
Arrow Length: 0.30000001192092896
Axes Length: 0.30000001192092896
Axes Radius: 0.009999999776482582
Class: rviz/PoseArray
Color: 0; 128; 0
Enabled: true
Head Length: 0.07000000029802322
Head Radius: 0.029999999329447746
Name: PoseArray
Queue Size: 10
Shaft Length: 0.23000000417232513
Shaft Radius: 0.009999999776482582
Shape: Arrow (Flat)
Topic: /gt_pose
Unreliable: false
Value: true
- Alpha: 1
Class: rviz/Axes
Enabled: true
Length: 1
Name: Axes
Radius: 0.10000000149011612
Reference Frame: <Fixed Frame>
Show Trail: false
Value: true
- Class: rviz/Image
Enabled: true
Image Topic: /camera_d455/color/image_raw
Max Value: 1
Median window: 5
Min Value: 0
Name: Image
Normalize Range: true
Queue Size: 2
Transport Hint: raw
Unreliable: false
Value: true
- Angle Tolerance: 0.10000000149011612
Class: rviz/Odometry
Covariance:
Orientation:
Alpha: 0.5
Color: 255; 255; 127
Color Style: Unique
Frame: Local
Offset: 1
Scale: 1
Value: true
Position:
Alpha: 0.30000001192092896
Color: 204; 51; 204
Scale: 1
Value: true
Value: true
Enabled: true
Keep: 1000
Name: Odometry
Position Tolerance: 0.10000000149011612
Queue Size: 1000
Shape:
Alpha: 1
Axes Length: 1
Axes Radius: 0.10000000149011612
Color: 255; 25; 0
Head Length: 0.30000001192092896
Head Radius: 0.10000000149011612
Shaft Length: 1
Shaft Radius: 0.05000000074505806
Value: Arrow
Topic: /vins_estimator/odometry
Unreliable: false
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Default Light: true
Fixed Frame: world
Frame Rate: 30
Name: root
Tools:
- Class: rviz/Interact
Hide Inactive Objects: true
- Class: rviz/MoveCamera
- Class: rviz/Select
- Class: rviz/FocusCamera
- Class: rviz/Measure
- Class: rviz/SetInitialPose
Theta std deviation: 0.2617993950843811
Topic: /initialpose
X std deviation: 0.5
Y std deviation: 0.5
- Class: rviz/SetGoal
Topic: /move_base_simple/goal
- Class: rviz/PublishPoint
Single click: true
Topic: /clicked_point
Value: true
Views:
Current:
Class: rviz/Orbit
Distance: 2.0610387325286865
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Field of View: 0.7853981852531433
Focal Point:
X: 0
Y: 0
Z: 0
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.19980168342590332
Target Frame: <Fixed Frame>
Yaw: 5.5282206535339355
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 973
Hide Left Dock: false
Hide Right Dock: true
Image:
collapsed: false
QMainWindow State: 000000ff00000000fd0000000400000000000001e40000032ffc0200000009fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d00000160000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb0000000a0049006d00610067006501000001a3000001c90000001600ffffff000000010000010f000002b0fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073000000003d000002b0000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000006b70000003efc0100000002fb0000000800540069006d00650100000000000006b7000002eb00fffffffb0000000800540069006d00650100000000000004500000000000000000000004cd0000032f00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Time:
collapsed: false
Tool Properties:
collapsed: false
Views:
collapsed: true
Width: 1719
X: 3640
Y: 142
================================================
FILE: setup.py
================================================
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
args = generate_distutils_setup(
packages=['locnerf'],
package_dir={'':'src'}
)
setup(**args)
================================================
FILE: src/full_filter.py
================================================
from matplotlib.markers import MarkerStyle
import torch
import numpy as np
import cv2
import matplotlib.pyplot as plt
import time
from utils import show_img, find_POI, img2mse, load_llff_data, get_pose
from full_nerf_helpers import load_nerf
from render_helpers import render, to8b, get_rays
from particle_filter import ParticleFilter
from scipy.spatial.transform import Rotation as R
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# part of this script is adapted from iNeRF https://github.com/salykovaa/inerf
# and NeRF-Pytorch https://github.com/yenchenlin/nerf-pytorch/blob/master/load_llff.py
class NeRF:
def __init__(self, nerf_params):
# Parameters
self.output_dir = './output/'
self.data_dir = nerf_params['data_dir']
self.model_name = nerf_params['model_name']
self.obs_img_num = nerf_params['obs_img_num']
self.batch_size = nerf_params['batch_size']
self.factor = nerf_params['factor']
self.near = nerf_params['near']
self.far = nerf_params['far']
self.spherify = False
self.kernel_size = nerf_params['kernel_size']
self.lrate = nerf_params['lrate']
self.dataset_type = nerf_params['dataset_type']
self.sampling_strategy = nerf_params['sampling_strategy']
self.delta_phi, self.delta_theta, self.delta_psi, self.delta_x, self.delta_y, self.delta_z = [0,0,0,0,0,0]
self.no_ndc = nerf_params['no_ndc']
self.dil_iter = nerf_params['dil_iter']
self.chunk = nerf_params['chunk'] # number of rays processed in parallel, decrease if running out of memory
self.bd_factor = nerf_params['bd_factor']
print("dataset type:", self.dataset_type)
print("no ndc:", self.no_ndc)
if self.dataset_type == 'custom':
print("self.factor", self.factor)
self.focal = nerf_params['focal'] / self.factor
self.H = nerf_params['H'] / self.factor
self.W = nerf_params['W'] / self.factor
# we don't actually use obs_img_pose when we run live images. this prevents attribute errors later in the code
self.obs_img_pose = None
self.H, self.W = int(self.H), int(self.W)
else:
self.obs_img, self.hwf, self.start_pose, self.obs_img_pose, self.bds = load_llff_data(self.data_dir, self.model_name, self.obs_img_num, self.delta_phi,
self.delta_theta, self.delta_psi, self.delta_x, self.delta_y, self.delta_z, factor=self.factor, recenter=True, bd_factor=self.bd_factor, spherify=self.spherify)
self.H, self.W, self.focal = self.hwf
if self.no_ndc:
self.near = np.ndarray.min(self.bds) * .9
self.far = np.ndarray.max(self.bds) * 1.
print(self.near, self.far)
else:
self.near = 0.
self.far = 1.
self.H, self.W = int(self.H), int(self.W)
self.obs_img = (np.array(self.obs_img) / 255.).astype(np.float32)
self.obs_img_noised = self.obs_img
# create meshgrid from the observed image
self.coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, self.W - 1, self.W), np.linspace(0, self.H - 1, self.H)), -1),
dtype=int)
self.coords = self.coords.reshape(self.H * self.W, 2)
# print("height, width, focal:", self.H, self.W, self.focal)
# Load NeRF Model
self.render_kwargs = load_nerf(nerf_params, device)
bds_dict = {
'near': self.near,
'far': self.far,
}
self.render_kwargs.update(bds_dict)
def get_poi_interest_regions(self, show_img=False, sampling_type = None):
# TODO see if other image normalization routines are better
self.obs_img_noised = (np.array(self.obs_img) / 255.0).astype(np.float32)
if show_img:
plt.imshow(self.obs_img_noised)
plt.show()
self.coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, self.W - 1, self.W), np.linspace(0, self.H - 1, self.H)), -1),
dtype=int)
if sampling_type == 'random':
self.coords = self.coords.reshape(self.H * self.W, 2)
def get_loss(self, particles, batch, photometric_loss='rgb'):
target_s = self.obs_img_noised[batch[:, 1], batch[:, 0]] # TODO check ordering here
target_s = torch.Tensor(target_s).to(device)
start_time = time.time()
num_pixels = len(particles) * len(batch)
all_rays_o = np.zeros((num_pixels,3))
all_rays_d = np.zeros((num_pixels,3))
for i, particle in enumerate(particles):
pose = torch.Tensor(particle).to(device)
rays_o, rays_d = get_rays(self.H, self.W, self.focal, pose) # TODO this line can be stored as a param
rays_o = rays_o[batch[:, 1], batch[:, 0]]
rays_d = rays_d[batch[:, 1], batch[:, 0]]
all_rays_o[i*len(batch): i*len(batch) + len(batch),:] = rays_o.cpu().detach().numpy()
all_rays_d[i*len(batch): i*len(batch) + len(batch),:] = rays_d.cpu().detach().numpy()
all_rays_o = torch.Tensor(all_rays_o).to(device)
all_rays_d = torch.Tensor(all_rays_d).to(device)
batch_rays = torch.stack([all_rays_o, all_rays_d], 0)
rgb_all, disp, acc, extras = render(self.H, self.W, self.focal, chunk=self.chunk, rays=batch_rays,
retraw=True,
**self.render_kwargs)
nerf_time = time.time() - start_time
# print(rgb_all)
# print()
# print(target_s)
losses = []
for i in range(len(particles)):
rgb = rgb_all[i*len(batch): i*len(batch) + len(batch)]
if photometric_loss == 'rgb':
loss = img2mse(rgb, target_s)
else:
# TODO throw an error
print("DID NOT ENTER A VALID LOSS METRIC")
losses.append(loss.item())
return losses, nerf_time
def visualize_nerf_image(self, nerf_pose):
pose_dummy = torch.from_numpy(nerf_pose).cuda()
with torch.no_grad():
print(nerf_pose)
rgb, disp, acc, _ = render(self.H, self.W, self.focal, chunk=self.chunk, c2w=pose_dummy[:3, :4], **self.render_kwargs)
rgb = rgb.cpu().detach().numpy()
rgb8 = to8b(rgb)
ref = to8b(self.obs_img)
plt.imshow(rgb8)
plt.show()
================================================
FILE: src/full_nerf_helpers.py
================================================
import torch
import os
torch.autograd.set_detect_anomaly(True)
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# most of this script is adapted from iNeRF https://github.com/salykovaa/inerf
# and NeRF-Pytorch https://github.com/yenchenlin/nerf-pytorch/blob/master/load_llff.py
def load_nerf(nerf_params, device):
"""Instantiate NeRF's MLP model.
"""
embed_fn, input_ch = get_embedder(nerf_params['multires'], nerf_params['i_embed'])
embeddirs_fn, input_ch_views = get_embedder(nerf_params['multires_views'], nerf_params['i_embed'])
output_ch = 4
skips = [4]
model = NeRF(D=nerf_params['netdepth'], W=nerf_params['netwidth'],
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=nerf_params['use_viewdirs']).to(device)
model_fine = NeRF(D=nerf_params['netdepth_fine'], W=nerf_params['netwidth_fine'],
input_ch=input_ch, output_ch=output_ch, skips=skips,
input_ch_views=input_ch_views, use_viewdirs=nerf_params['use_viewdirs']).to(device)
network_query_fn = lambda inputs, viewdirs, network_fn: run_network(inputs, viewdirs, network_fn,
embed_fn=embed_fn,
embeddirs_fn=embeddirs_fn,
netchunk=nerf_params['netchunk'])
# Load checkpoint, order tar files and pick the most trained if multiple exist.
ckpt_dir = nerf_params['ckpt_dir']
ckpt_name = ""
ckpts = [os.path.join(ckpt_dir, ckpt_name, f) for f in sorted(os.listdir(os.path.join(ckpt_dir, ckpt_name))) if 'tar' in f]
ckpt_path = ckpts[-1]
print('Found ckpts', ckpts)
print('Reloading from', ckpt_path)
ckpt = torch.load(ckpt_path)
# Load model
model.load_state_dict(ckpt['network_fn_state_dict'], strict=False)
model_fine.load_state_dict(ckpt['network_fine_state_dict'], strict=False)
render_kwargs = {
'network_query_fn': network_query_fn,
'perturb': nerf_params['perturb'],
'N_importance': nerf_params['fine_samples'],
'network_fine': model_fine,
'N_samples': nerf_params['course_samples'],
'network_fn': model,
'use_viewdirs': nerf_params['use_viewdirs'],
'white_bkgd': nerf_params['white_bkgd'],
'raw_noise_std': nerf_params['raw_noise_std']
}
# NDC only good for LLFF-style forward facing data
if nerf_params['dataset_type'] != 'llff' or nerf_params['no_ndc']:
print('Not ndc!')
render_kwargs['ndc'] = False
render_kwargs['lindisp'] = nerf_params['lindisp']
if nerf_params['dataset_type'] != 'llff' and not nerf_params['no_ndc']:
print("WARNING YOU CANNOT USE NDC WITH NON LLFF DATA - NDC WILL NOT BE USED")
# Disable updating of the weights
for param in model.parameters():
param.requires_grad = False
for param in model_fine.parameters():
param.requires_grad = False
return render_kwargs
# Positional encoding (section 5.1)
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs['input_dims']
out_dim = 0
if self.kwargs['include_input']:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs['max_freq_log2']
N_freqs = self.kwargs['num_freqs']
if self.kwargs['log_sampling']:
freq_bands = 2. ** torch.linspace(0., max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2. ** 0., 2. ** max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs['periodic_fns']:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires, i=0):
if i == -1:
return nn.Identity(), 3
embed_kwargs = {
'include_input': True,
'input_dims': 3,
'max_freq_log2': multires - 1,
'num_freqs': multires,
'log_sampling': True,
'periodic_fns': [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
embed = lambda x, eo=embedder_obj: eo.embed(x)
return embed, embedder_obj.out_dim
# Model
class NeRF(nn.Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch=4, skips=[4], use_viewdirs=False):
"""
"""
super(NeRF, self).__init__()
self.D = D
self.W = W
self.input_ch = input_ch
self.input_ch_views = input_ch_views
self.skips = skips
self.use_viewdirs = use_viewdirs
self.pts_linears = nn.ModuleList(
[nn.Linear(input_ch, W)] + [nn.Linear(W, W) if i not in self.skips else nn.Linear(W + input_ch, W) for i in
range(D - 1)])
### Implementation according to the official code release (https://github.com/bmild/nerf/blob/master/run_nerf_helpers.py#L104-L105)
self.views_linears = nn.ModuleList([nn.Linear(input_ch_views + W, W // 2)])
### Implementation according to the paper
# self.views_linears = nn.ModuleList(
# [nn.Linear(input_ch_views + W, W//2)] + [nn.Linear(W//2, W//2) for i in range(D//2)])
if use_viewdirs:
self.feature_linear = nn.Linear(W, W)
self.alpha_linear = nn.Linear(W, 1)
self.rgb_linear = nn.Linear(W // 2, 3)
else:
self.output_linear = nn.Linear(W, output_ch)
def forward(self, x):
input_pts, input_views = torch.split(x, [self.input_ch, self.input_ch_views], dim=-1)
h = input_pts
for i, l in enumerate(self.pts_linears):
h = self.pts_linears[i](h)
h = F.relu(h)
if i in self.skips:
h = torch.cat([input_pts, h], -1)
if self.use_viewdirs:
alpha = self.alpha_linear(h)
feature = self.feature_linear(h)
h = torch.cat([feature, input_views], -1)
for i, l in enumerate(self.views_linears):
h = self.views_linears[i](h)
h = F.relu(h)
rgb = self.rgb_linear(h)
outputs = torch.cat([rgb, alpha], -1)
else:
outputs = self.output_linear(h)
return outputs
def load_weights_from_keras(self, weights):
assert self.use_viewdirs, "Not implemented if use_viewdirs=False"
print("here")
# Load pts_linears
for i in range(self.D):
idx_pts_linears = 2 * i
self.pts_linears[i].weight.data = torch.from_numpy(np.transpose(weights[idx_pts_linears]))
self.pts_linears[i].bias.data = torch.from_numpy(np.transpose(weights[idx_pts_linears + 1]))
# Load feature_linear
idx_feature_linear = 2 * self.D
self.feature_linear.weight.data = torch.from_numpy(np.transpose(weights[idx_feature_linear]))
self.feature_linear.bias.data = torch.from_numpy(np.transpose(weights[idx_feature_linear + 1]))
# Load views_linears
idx_views_linears = 2 * self.D + 2
self.views_linears[0].weight.data = torch.from_numpy(np.transpose(weights[idx_views_linears]))
self.views_linears[0].bias.data = torch.from_numpy(np.transpose(weights[idx_views_linears + 1]))
# Load rgb_linear
idx_rbg_linear = 2 * self.D + 4
self.rgb_linear.weight.data = torch.from_numpy(np.transpose(weights[idx_rbg_linear]))
self.rgb_linear.bias.data = torch.from_numpy(np.transpose(weights[idx_rbg_linear + 1]))
# Load alpha_linear
idx_alpha_linear = 2 * self.D + 6
self.alpha_linear.weight.data = torch.from_numpy(np.transpose(weights[idx_alpha_linear]))
self.alpha_linear.bias.data = torch.from_numpy(np.transpose(weights[idx_alpha_linear + 1]))
def batchify(fn, chunk):
"""Constructs a version of 'fn' that applies to smaller batches.
"""
if chunk is None:
return fn
def ret(inputs):
return torch.cat([fn(inputs[i:i+chunk]) for i in range(0, inputs.shape[0], chunk)], 0)
return ret
def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1024*64):
"""Prepares inputs and applies network 'fn'.
"""
inputs_flat = torch.reshape(inputs, [-1, inputs.shape[-1]])
embedded = embed_fn(inputs_flat)
if viewdirs is not None:
input_dirs = viewdirs[:,None].expand(inputs.shape)
input_dirs_flat = torch.reshape(input_dirs, [-1, input_dirs.shape[-1]])
embedded_dirs = embeddirs_fn(input_dirs_flat)
embedded = torch.cat([embedded, embedded_dirs], -1)
outputs_flat = batchify(fn, netchunk)(embedded)
outputs = torch.reshape(outputs_flat, list(inputs.shape[:-1]) + [outputs_flat.shape[-1]])
return outputs
================================================
FILE: src/nav_node.py
================================================
#!/usr/bin/env python
from turtle import pos
import rospy
import numpy as np
import gtsam
import cv2
import torch
import time
import glob
import matplotlib.pyplot as plt
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseArray, Pose
from scipy.spatial.transform import Rotation as R
from copy import copy
import locnerf
from full_filter import NeRF
from particle_filter import ParticleFilter
from utils import get_pose
from navigator_base import NavigatorBase
torch.set_default_tensor_type('torch.cuda.FloatTensor')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Navigator(NavigatorBase):
def __init__(self, img_num=None, dataset_name=None):
# Base class handles loading most params.
NavigatorBase.__init__(self, img_num=img_num, dataset_name=dataset_name)
# Set initial distribution of particles.
self.get_initial_distribution()
self.br = CvBridge()
# Set up publishers.
self.particle_pub = rospy.Publisher("/particles", PoseArray, queue_size = 10)
self.pose_pub = rospy.Publisher("/estimated_pose", Odometry, queue_size = 10)
self.gt_pub = rospy.Publisher("/gt_pose", PoseArray, queue_size = 10)
# Set up subscribers.
# We don't need callbacks to compare against inerf.
if not self.run_inerf_compare:
self.image_sub = rospy.Subscriber(self.rgb_topic,Image,self.rgb_callback, queue_size=1, buff_size=2**24)
if self.run_predicts:
self.vio_sub = rospy.Subscriber(self.pose_topic, Odometry, self.vio_callback, queue_size = 10)
# Show initial distribution of particles
if self.plot_particles:
self.visualize()
if self.log_results:
# If using a provided start we already have ground truth, so don't log redundant gt.
if not self.use_logged_start:
with open(self.log_directory + "/" + "gt_" + self.model_name + "_" + str(self.obs_img_num) + "_" + "poses.npy", 'wb') as f:
np.save(f, self.gt_pose)
# Add initial pose estimate before first update step is run.
if self.use_weighted_avg:
position_est = self.filter.compute_weighted_position_average()
else:
position_est = self.filter.compute_simple_position_average()
rot_est = self.filter.compute_simple_rotation_average()
pose_est = gtsam.Pose3(rot_est, position_est).matrix()
self.all_pose_est.append(pose_est)
def get_initial_distribution(self):
# NOTE for now assuming everything stays in NeRF coordinates (x right, y up, z inward)
if self.run_inerf_compare:
# for non-global loc mode, get random pose based on iNeRF evaluation method from their paper
# sample random axis from unit sphere and then rotate by a random amount between [-40, 40] degrees
# translate along each axis by a random amount between [-10, 10] cm
rot_rand = 40.0
if self.global_loc_mode:
trans_rand = 1.0
else:
trans_rand = 0.1
# get random axis and angle for rotation
x = np.random.rand()
y = np.random.rand()
z = np.random.rand()
axis = np.array([x,y,z])
axis = axis / np.linalg.norm(axis)
angle = np.pi * np.random.uniform(low=-rot_rand, high=rot_rand) / 180.0
euler = (gtsam.Rot3.AxisAngle(axis, angle)).ypr()
# get random translation offset
t_x = np.random.uniform(low=-trans_rand, high=trans_rand)
t_y = np.random.uniform(low=-trans_rand, high=trans_rand)
t_z = np.random.uniform(low=-trans_rand, high=trans_rand)
# use initial random pose from previously saved log
if self.use_logged_start:
log_file = self.log_directory + "/" + "initial_pose_" + self.model_name + "_" + str(self.obs_img_num) + "_" + "poses.npy"
start = np.load(log_file)
print(start)
euler[0], euler[1], euler[2], t_x, t_y, t_z = start
# log initial random pose
elif self.log_results:
with open(self.log_directory + "/" + "initial_pose_" + self.model_name + "_" + str(self.obs_img_num) + "_" + "poses.npy", 'wb') as f:
np.save(f, np.array([euler[0], euler[1], euler[2], t_x, t_y, t_z]))
if self.global_loc_mode:
# 360 degree rotation distribution about yaw
self.initial_particles_noise = np.random.uniform(np.array([-trans_rand, -trans_rand, -trans_rand, 0, -179, 0]), np.array([trans_rand, trans_rand, trans_rand, 0, 179, 0]), size = (self.num_particles, 6))
else:
self.initial_particles_noise = np.random.uniform(np.array([-trans_rand, -trans_rand, -trans_rand, 0, 0, 0]), np.array([trans_rand, trans_rand, trans_rand, 0, 0, 0]), size = (self.num_particles, 6))
# center translation at randomly sampled position
self.initial_particles_noise[:, 0] += t_x
self.initial_particles_noise[:, 1] += t_y
self.initial_particles_noise[:, 2] += t_z
if not self.global_loc_mode:
for i in range(self.initial_particles_noise.shape[0]):
# rotate random 3 DOF rotation about initial random rotation for each particle
n1 = np.pi * np.random.uniform(low=-rot_rand, high=rot_rand) / 180.0
n2 = np.pi * np.random.uniform(low=-rot_rand, high=rot_rand) / 180.0
n3 = np.pi * np.random.uniform(low=-rot_rand, high=rot_rand) / 180.0
euler_particle = gtsam.Rot3.AxisAngle(axis, angle).retract(np.array([n1, n2, n3])).ypr()
# add rotation noise for initial particle distribution
self.initial_particles_noise[i,3] = euler_particle[0] * 180.0 / np.pi
self.initial_particles_noise[i,4] = euler_particle[1] * 180.0 / np.pi
self.initial_particles_noise[i,5] = euler_particle[2] * 180.0 / np.pi
else:
# get distribution of particles from user
self.initial_particles_noise = np.random.uniform(np.array(
[self.min_bounds['px'], self.min_bounds['py'], self.min_bounds['pz'], self.min_bounds['rz'], self.min_bounds['ry'], self.min_bounds['rx']]),
np.array([self.max_bounds['px'], self.max_bounds['py'], self.max_bounds['pz'], self.max_bounds['rz'], self.max_bounds['ry'], self.max_bounds['rx']]),
size = (self.num_particles, 6))
self.initial_particles = self.set_initial_particles()
self.filter = ParticleFilter(self.initial_particles)
def vio_callback(self, msg):
# extract rotation and position from msg
quat = msg.pose.pose.orientation
position = msg.pose.pose.position
# rotate vins to be in nerf frame
rx = self.R_bodyVins_camNerf['rx']
ry = self.R_bodyVins_camNerf['ry']
rz = self.R_bodyVins_camNerf['rz']
T_bodyVins_camNerf = gtsam.Pose3(gtsam.Rot3.Ypr(rz, ry, rx), gtsam.Point3(0,0,0))
T_wVins_camVins = gtsam.Pose3(gtsam.Rot3(quat.w, quat.x, quat.y, quat.z), gtsam.Point3(position.x, position.y, position.z))
T_wVins_camNeRF = gtsam.Pose3(T_wVins_camVins.matrix() @ T_bodyVins_camNerf.matrix())
if self.previous_vio_pose is not None:
T_camNerft_camNerftp1 = gtsam.Pose3(self.previous_vio_pose.inverse().matrix() @ T_wVins_camNeRF.matrix())
self.run_predict(T_camNerft_camNerftp1)
# log pose for next transform computation
self.previous_vio_pose = T_wVins_camNeRF
# publish particles for rviz
if self.plot_particles:
self.visualize()
def rgb_callback(self, msg):
self.img_msg = msg
def rgb_run(self, msg, get_rays_fn=None, render_full_image=False):
print("processing image")
start_time = time.time()
self.rgb_input_count += 1
# make copies to prevent mutations
particles_position_before_update = np.copy(self.filter.particles['position'])
particles_rotation_before_update = [gtsam.Rot3(i.matrix()) for i in self.filter.particles['rotation']]
if self.use_convergence_protection:
for i in range(self.number_convergence_particles):
t_x = np.random.uniform(low=-self.convergence_noise, high=self.convergence_noise)
t_y = np.random.uniform(low=-self.convergence_noise, high=self.convergence_noise)
t_z = np.random.uniform(low=-self.convergence_noise, high=self.convergence_noise)
# TODO this is not thread safe. have two lines because we need to both update
# particles to check the loss and the actual locations of the particles
self.filter.particles["position"][i] = self.filter.particles["position"][i] + np.array([t_x, t_y, t_z])
particles_position_before_update[i] = particles_position_before_update[i] + np.array([t_x, t_y, t_z])
if self.use_received_image:
img = self.br.imgmsg_to_cv2(msg)
# resize input image so it matches the scale that NeRF expects
img = cv2.resize(self.br.imgmsg_to_cv2(msg), (int(self.nerf.W), int(self.nerf.H)))
# img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
self.nerf.obs_img = img
show_true = self.view_debug_image_iteration != 0 and self.num_updates == self.view_debug_image_iteration-1
self.nerf.get_poi_interest_regions(show_true, self.sampling_strategy)
# plt.imshow(self.nerf.obs_img)
# plt.show()
total_nerf_time = 0
if self.sampling_strategy == 'random':
rand_inds = np.random.choice(self.nerf.coords.shape[0], size=self.nerf.batch_size, replace=False)
batch = self.nerf.coords[rand_inds]
loss_poses = []
for index, particle in enumerate(particles_position_before_update):
loss_pose = np.zeros((4,4))
rot = particles_rotation_before_update[index]
loss_pose[0:3, 0:3] = rot.matrix()
loss_pose[0:3,3] = particle[0:3]
loss_pose[3,3] = 1.0
loss_poses.append(loss_pose)
losses, nerf_time = self.nerf.get_loss(loss_poses, batch, self.photometric_loss)
for index, particle in enumerate(particles_position_before_update):
self.filter.weights[index] = 1/losses[index]
total_nerf_time += nerf_time
self.filter.update()
self.num_updates += 1
print("UPDATE STEP NUMBER", self.num_updates, "RAN")
print("number particles:", self.num_particles)
if self.use_refining: # TODO make it where you can reduce number of particles without using refining
self.check_refine_gate()
if self.use_weighted_avg:
avg_pose = self.filter.compute_weighted_position_average()
else:
avg_pose = self.filter.compute_simple_position_average()
avg_rot = self.filter.compute_simple_rotation_average()
self.nerf_pose = gtsam.Pose3(avg_rot, gtsam.Point3(avg_pose[0], avg_pose[1], avg_pose[2])).matrix()
if self.plot_particles:
self.visualize()
# TODO add ability to render several frames
if self.view_debug_image_iteration != 0 and (self.num_updates == self.view_debug_image_iteration):
self.nerf.visualize_nerf_image(self.nerf_pose)
if not self.use_received_image:
if self.use_weighted_avg:
print("average position of all particles: ", self.filter.compute_weighted_position_average())
print("position error: ", np.linalg.norm(self.gt_pose[0:3,3] - self.filter.compute_weighted_position_average()))
else:
print("average position of all particles: ", self.filter.compute_simple_position_average())
print("position error: ", np.linalg.norm(self.gt_pose[0:3,3] - self.filter.compute_simple_position_average()))
if self.use_weighted_avg:
position_est = self.filter.compute_weighted_position_average()
else:
position_est = self.filter.compute_simple_position_average()
rot_est = self.filter.compute_simple_rotation_average()
pose_est = gtsam.Pose3(rot_est, position_est).matrix()
if self.log_results:
self.all_pose_est.append(pose_est)
if not self.run_inerf_compare:
img_timestamp = msg.header.stamp
self.publish_pose_est(pose_est, img_timestamp)
else:
self.publish_pose_est(pose_est)
update_time = time.time() - start_time
print("forward passes took:", total_nerf_time, "out of total", update_time, "for update step")
if not self.run_predicts:
self.filter.predict_no_motion(self.px_noise, self.py_noise, self.pz_noise, self.rot_x_noise, self.rot_y_noise, self.rot_z_noise) # used if you want to localize a static image
# return is just for logging
return pose_est
def check_if_position_error_good(self, return_error = False):
"""
check if position error is less than 5cm, or return the error if return_error is True
"""
acceptable_error = 0.05
if self.use_weighted_avg:
error = np.linalg.norm(self.gt_pose[0:3,3] - self.filter.compute_weighted_position_average())
if return_error:
return error
return error < acceptable_error
else:
error = np.linalg.norm(self.gt_pose[0:3,3] - self.filter.compute_simple_position_average())
if return_error:
return error
return error < acceptable_error
def check_if_rotation_error_good(self, return_error = False):
"""
check if rotation error is less than 5 degrees, or return the error if return_error is True
"""
acceptable_error = 5.0
average_rot_t = (self.filter.compute_simple_rotation_average()).transpose()
# check rot in bounds by getting angle using https://math.stackexchange.com/questions/2113634/comparing-two-rotation-matrices
r_ab = average_rot_t @ (self.gt_pose[0:3,0:3])
rot_error = np.rad2deg(np.arccos((np.trace(r_ab) - 1) / 2))
print("rotation error: ", rot_error)
if return_error:
return rot_error
return abs(rot_error) < acceptable_error
def run_predict(self, delta_pose):
self.filter.predict_with_delta_pose(delta_pose, self.px_noise, self.py_noise, self.pz_noise, self.rot_x_noise, self.rot_y_noise, self.rot_z_noise)
if self.plot_particles:
self.visualize()
def set_initial_particles(self):
initial_positions = np.zeros((self.num_particles, 3))
rots = []
for index, particle in enumerate(self.initial_particles_noise):
x = particle[0]
y = particle[1]
z = particle[2]
phi = particle[3]
theta = particle[4]
psi = particle[5]
particle_pose = get_pose(phi, theta, psi, x, y, z, self.nerf.obs_img_pose, self.center_about_true_pose)
# set positions
initial_positions[index,:] = [particle_pose[0,3], particle_pose[1,3], particle_pose[2,3]]
# set orientations
rots.append(gtsam.Rot3(particle_pose[0:3,0:3]))
# print(initial_particles)
return {'position':initial_positions, 'rotation':np.array(rots)}
def set_noise(self, scale):
self.px_noise = rospy.get_param('px_noise') / scale
self.py_noise = rospy.get_param('py_noise') / scale
self.pz_noise = rospy.get_param('pz_noise') / scale
self.rot_x_noise = rospy.get_param('rot_x_noise') / scale
self.rot_y_noise = rospy.get_param('rot_y_noise') / scale
self.rot_z_noise = rospy.get_param('rot_z_noise') / scale
def check_refine_gate(self):
# get standard deviation of particle position
sd_xyz = np.std(self.filter.particles['position'], axis=0)
norm_std = np.linalg.norm(sd_xyz)
refining_used = False
print("sd_xyz:", sd_xyz)
print("norm sd_xyz:", np.linalg.norm(sd_xyz))
if norm_std < self.alpha_super_refine:
print("SUPER REFINE MODE ON")
# reduce original noise by a factor of 4
self.set_noise(scale = 4.0)
refining_used = True
elif norm_std < self.alpha_refine:
print("REFINE MODE ON")
# reduce original noise by a factor of 2
self.set_noise(scale = 2.0)
refining_used = True
else:
# reset noise to original value
self.set_noise(scale = 1.0)
if refining_used and self.use_particle_reduction:
self.filter.reduce_num_particles(self.min_number_particles)
self.num_particles = self.min_number_particles
def publish_pose_est(self, pose_est_gtsam, img_timestamp = None):
pose_est = Odometry()
pose_est.header.frame_id = "world"
# if we don't run on rosbag data then we don't have timestamps
if img_timestamp is not None:
pose_est.header.stamp = img_timestamp
pose_est_gtsam = gtsam.Pose3(pose_est_gtsam)
position_est = pose_est_gtsam.translation()
rot_est = pose_est_gtsam.rotation().quaternion()
# populate msg with pose information
pose_est.pose.pose.position.x = position_est[0]
pose_est.pose.pose.position.y = position_est[1]
pose_est.pose.pose.position.z = position_est[2]
pose_est.pose.pose.orientation.w = rot_est[0]
pose_est.pose.pose.orientation.x = rot_est[1]
pose_est.pose.pose.orientation.y = rot_est[2]
pose_est.pose.pose.orientation.z = rot_est[3]
# print(pose_est_gtsam.rotation().ypr())
# publish pose
self.pose_pub.publish(pose_est)
def visualize(self):
# publish pose array of particles' poses
poses = []
R_nerf_body = gtsam.Rot3.Rx(-np.pi/2)
for index, particle in enumerate(self.filter.particles['position']):
p = Pose()
p.position.x = particle[0]
p.position.y = particle[1]
p.position.z = particle[2]
# print(particle[3],particle[4],particle[5])
rot = self.filter.particles['rotation'][index]
orient = rot.quaternion()
p.orientation.w = orient[0]
p.orientation.x = orient[1]
p.orientation.y = orient[2]
p.orientation.z = orient[3]
poses.append(p)
pa = PoseArray()
pa.poses = poses
pa.header.frame_id = "world"
self.particle_pub.publish(pa)
# if we have a ground truth pose then publish it
if not self.use_received_image or self.gt_pose is not None:
gt_array = PoseArray()
gt = Pose()
gt_rot = gtsam.Rot3(self.gt_pose[0:3,0:3]).quaternion()
gt.orientation.w = gt_rot[0]
gt.orientation.x = gt_rot[1]
gt.orientation.y = gt_rot[2]
gt.orientation.z = gt_rot[3]
gt.position.x = self.gt_pose[0,3]
gt.position.y = self.gt_pose[1,3]
gt.position.z = self.gt_pose[2,3]
gt_array.poses = [gt]
gt_array.header.frame_id = "world"
self.gt_pub.publish(gt_array)
def average_arrays(axis_list):
"""
average arrays of different size
adapted from https://stackoverflow.com/questions/49037902/how-to-interpolate-a-line-between-two-other-lines-in-python/49041142#49041142
axis_list = [forward_passes_all, accuracy]
"""
min_max_xs = [(min(axis), max(axis)) for axis in axis_list[0]]
new_axis_xs = [np.linspace(min_x, max_x, 100) for min_x, max_x in min_max_xs]
new_axis_ys = []
for i in range(len(axis_list[0])):
new_axis_ys.append(np.interp(new_axis_xs[i], axis_list[0][i], axis_list[1][i]))
midx = [np.mean([new_axis_xs[axis_idx][i] for axis_idx in range(len(axis_list[0]))])/1000.0 for i in range(100)]
midy = [np.mean([new_axis_ys[axis_idx][i] for axis_idx in range(len(axis_list[0]))]) for i in range(100)]
plt.plot(midx, midy)
plt.xlabel("number of forward passes (in thousands)")
plt.grid()
plt.show()
if __name__ == "__main__":
rospy.init_node("nav_node")
run_inerf_compare = rospy.get_param("run_inerf_compare")
use_logged_start = rospy.get_param("use_logged_start")
log_directory = rospy.get_param("log_directory")
if run_inerf_compare:
num_starts_per_dataset = 5 # TODO make this a param
datasets = ['fern', 'horns', 'fortress', 'room'] # TODO make this a param
# datasets = ['fern']
total_position_error_good = []
total_rotation_error_good = []
total_num_forward_passes = []
for dataset_index, dataset_name in enumerate(datasets):
print("Starting iNeRF Style Test on Dataset: ", dataset_name)
if use_logged_start:
start_pose_files = glob.glob(log_directory + "/initial_pose_" + dataset_name + '_' +'*')
# only use an image number once per dataset
used_img_nums = set()
for i in range(num_starts_per_dataset):
if not use_logged_start:
img_num = np.random.randint(low=0, high=20) # TODO can increase the range of images
while img_num in used_img_nums:
img_num = np.random.randint(low=0, high=20)
used_img_nums.add(img_num)
else:
start_file = start_pose_files[i]
img_num = int(start_file.split('_')[5])
mcl_local = Navigator(img_num, dataset_name)
print()
print("Using Image Number:", mcl_local.obs_img_num)
print("Test", i+1, "out of", num_starts_per_dataset)
num_forward_passes_per_iteration = [0]
position_error_good = []
rotation_error_good = []
ii = 0
while num_forward_passes_per_iteration[-1] < mcl_local.forward_passes_limit:
print()
print("forward pass limit, current number forward passes:", mcl_local.forward_passes_limit, num_forward_passes_per_iteration[-1])
position_error_good.append(int(mcl_local.check_if_position_error_good()))
rotation_error_good.append(int(mcl_local.check_if_rotation_error_good()))
if ii != 0:
mcl_local.rgb_run('temp')
num_forward_passes_per_iteration.append(num_forward_passes_per_iteration[ii-1] + mcl_local.num_particles * (mcl_local.course_samples + mcl_local.fine_samples) * mcl_local.batch_size)
ii += 1
if mcl_local.log_results:
with open(mcl_local.log_directory + "/" + "mocnerf_" + mcl_local.log_prefix + "_" + mcl_local.model_name + "_" + str(mcl_local.obs_img_num) + "_" + "poses.npy", 'wb') as f:
np.save(f, np.array(mcl_local.all_pose_est))
with open(mcl_local.log_directory + "/" + "mocnerf_" + mcl_local.log_prefix + "_" + mcl_local.model_name + "_" + str(mcl_local.obs_img_num) + "_" + "forward_passes.npy", 'wb') as f:
np.save(f, np.array(num_forward_passes_per_iteration))
total_num_forward_passes.append(num_forward_passes_per_iteration)
total_position_error_good.append(position_error_good)
total_rotation_error_good.append(rotation_error_good)
average_arrays([total_num_forward_passes, total_position_error_good])
average_arrays([total_num_forward_passes, total_rotation_error_good])
# run normal live ROS mode
else:
mcl = Navigator()
while not rospy.is_shutdown():
if mcl.img_msg is not None:
mcl.rgb_run(mcl.img_msg)
mcl.img_msg = None # TODO not thread safe
================================================
FILE: src/navigator_base.py
================================================
import rospy
import numpy as np
import warnings
from full_filter import NeRF
# Base class to handle loading params from yaml.
class NavigatorBase:
def __init__(self, img_num=0, dataset_name=None):
# extract params
self.factor = rospy.get_param('factor')
self.focal = rospy.get_param('focal')
self.H = rospy.get_param('H')
self.W = rospy.get_param('W')
self.dataset_type = rospy.get_param('dataset_type')
self.num_particles = rospy.get_param('num_particles')
self.plot_particles = rospy.get_param('visualize_particles')
self.rgb_topic = rospy.get_param('rgb_topic')
self.pose_topic = rospy.get_param('vio_topic')
self.near = rospy.get_param('near')
self.far = rospy.get_param('far')
self.course_samples = rospy.get_param('course_samples')
self.fine_samples = rospy.get_param('fine_samples')
self.batch_size = rospy.get_param('batch_size')
self.kernel_size = rospy.get_param('kernel_size')
self.lrate = rospy.get_param('lrate')
self.sampling_strategy = rospy.get_param('sampling_strategy')
self.no_ndc = rospy.get_param('no_ndc')
self.dil_iter = rospy.get_param('dil_iter')
self.multires = rospy.get_param('multires')
self.multires_views = rospy.get_param('multires_views')
self.i_embed = rospy.get_param('i_embed')
self.netwidth = rospy.get_param('netwidth')
self.netdepth = rospy.get_param('netdepth')
self.netdepth_fine = rospy.get_param('netdepth_fine')
self.netwidth_fine = rospy.get_param('netwidth_fine')
self.use_viewdirs = rospy.get_param('use_viewdirs')
self.perturb = rospy.get_param('perturb')
self.white_bkgd = rospy.get_param('white_bkgd')
self.raw_noise_std = rospy.get_param('raw_noise_std')
self.lindisp = rospy.get_param('lindisp')
self.netchunk = rospy.get_param('netchunk')
self.chunk = rospy.get_param('chunk')
self.bd_factor = rospy.get_param('bd_factor')
self.log_prefix = rospy.get_param('log_prefix')
# just used for Nerf-Navigation comparison
self.model_ngp = None
self.ngp_opt = None
if dataset_name is not None:
self.model_name = dataset_name
else:
self.model_name = rospy.get_param('model_name')
self.data_dir = rospy.get_param('data_dir') + "/" + self.model_name
self.ckpt_dir = rospy.get_param('ckpt_dir') + "/" + self.model_name
self.obs_img_num = img_num
# TODO these don't individually need to be part of the navigator class
nerf_params = {'near':self.near, 'far':self.far, 'course_samples':self.course_samples, 'fine_samples':self.fine_samples,
'batch_size':self.batch_size, 'factor':self.factor, 'focal':self.focal, 'H':self.H, 'W':self.W, 'dataset_type':self.dataset_type,
'obs_img_num':self.obs_img_num, 'kernel_size':self.kernel_size, 'lrate':self.lrate, 'sampling_strategy':self.sampling_strategy,
'model_name':self.model_name, 'data_dir':self.data_dir, 'no_ndc':self.no_ndc, 'dil_iter':self.dil_iter,
'multires':self.multires, 'multires_views':self.multires_views, 'i_embed':self.i_embed, 'netwidth':self.netwidth, 'netdepth':self.netdepth,
'netdepth_fine':self.netdepth_fine, 'netwidth_fine':self.netwidth_fine, 'use_viewdirs':self.use_viewdirs, 'ckpt_dir':self.ckpt_dir,
'perturb':self.perturb, 'white_bkgd':self.white_bkgd, 'raw_noise_std':self.raw_noise_std, 'lindisp':self.lindisp,
'netchunk':self.netchunk, 'chunk':self.chunk, 'bd_factor':self.bd_factor}
self.nerf = NeRF(nerf_params)
self.image = None
self.rgb_input_count = 0
self.num_updates = 0
self.photometric_loss = rospy.get_param('photometric_loss')
self.view_debug_image_iteration = rospy.get_param('view_debug_image_iteration')
self.px_noise = rospy.get_param('px_noise')
self.py_noise = rospy.get_param('py_noise')
self.pz_noise = rospy.get_param('pz_noise')
self.rot_x_noise = rospy.get_param('rot_x_noise')
self.rot_y_noise = rospy.get_param('rot_y_noise')
self.rot_z_noise = rospy.get_param('rot_z_noise')
self.use_convergence_protection = rospy.get_param('use_convergence_protection')
self.number_convergence_particles = rospy.get_param('number_convergence_particles')
self.convergence_noise = rospy.get_param('convergence_noise')
self.use_weighted_avg = rospy.get_param('use_weighted_avg')
self.min_number_particles = rospy.get_param('min_number_particles')
self.use_particle_reduction = rospy.get_param('use_particle_reduction')
self.alpha_refine = rospy.get_param('alpha_refine')
self.alpha_super_refine = rospy.get_param('alpha_super_refine')
self.run_predicts = rospy.get_param('run_predicts')
self.use_received_image = rospy.get_param('use_received_image')
self.run_inerf_compare = rospy.get_param('run_inerf_compare')
self.global_loc_mode = rospy.get_param('global_loc_mode')
self.run_nerfnav_compare = rospy.get_param('run_nerfnav_compare')
self.nerf_nav_directory = rospy.get_param('nerf_nav_directory')
self.center_about_true_pose = rospy.get_param('center_about_true_pose')
self.use_refining = rospy.get_param('use_refining')
self.log_results = rospy.get_param('log_results')
self.log_directory = rospy.get_param('log_directory')
self.use_logged_start = rospy.get_param('use_logged_start')
self.forward_passes_limit = rospy.get_param('forward_passes_limit')
self.min_bounds = rospy.get_param('min_bounds')
self.max_bounds = rospy.get_param('max_bounds')
self.R_bodyVins_camNerf = rospy.get_param('R_bodyVins_camNerf')
self.previous_vio_pose = None
self.nerf_pose = None
self.all_pose_est = [] # plus 1 since we put in the initial pose before the first update
self.img_msg = None
# for now only have gt pose for llff dataset for inerf comparison and nerf-nav comparison
self.gt_pose = None
if not self.use_received_image:
self.gt_pose = np.copy(self.nerf.obs_img_pose)
self.check_params()
def check_params(self):
"""
Useful helper function to check if suspicious or invalid params are being used.
TODO: Not all bad combinations of params are currently checked here.
"""
if self.alpha_super_refine > self.alpha_refine:
warnings.warn("alpha_super_refine is larger than alpha_refine, code will run but they are probably flipped by the user")
if self.sampling_strategy != "random":
warnings.warn("did not enter a valid sampling strategy. Currently the following are supported: random")
if self.photometric_loss != "rgb":
warnings.warn("did not enter a valid photometric loss. Currently the following are supported: rgb")
================================================
FILE: src/particle_filter.py
================================================
import numpy as np
import gtsam
from multiprocessing import Lock
class ParticleFilter:
def __init__(self, initial_particles):
self.num_particles=len(initial_particles['position'])
self.particles = initial_particles
self.weights=np.ones(self.num_particles)
self.particle_lock = Lock()
def reduce_num_particles(self, num_particles):
self.particle_lock.acquire()
self.num_particles = num_particles
self.weights = self.weights[0:num_particles]
self.particles['position'] = self.particles['position'][0:num_particles]
self.particles['rotation'] = self.particles['rotation'][0:num_particles]
self.particle_lock.release()
def predict_no_motion(self, p_x, p_y, p_z, r_x, r_y, r_z):
self.particle_lock.acquire()
self.particles['position'][:,0] += p_x * np.random.normal(size = (self.particles['position'].shape[0]))
self.particles['position'][:,1] += p_y * np.random.normal(size = (self.particles['position'].shape[0]))
self.particles['position'][:,2] += p_z * np.random.normal(size = (self.particles['position'].shape[0]))
# TODO see if this can be made faster
for i in range(len(self.particles['rotation'])):
n1 = r_x * np.random.normal()
n2 = r_y * np.random.normal()
n3 = r_z * np.random.normal()
self.particles['rotation'][i] = self.particles['rotation'][i].retract(np.array([n1, n2, n3]))
self.particle_lock.release()
def predict_with_delta_pose(self, delta_pose, p_x, p_y, p_z, r_x, r_y, r_z):
self.particle_lock.acquire()
# TODO see if this can be made faster
delta_rot_t_tp1= delta_pose.rotation()
for i in range(len(self.particles['rotation'])):
# TODO do rotation in gtsam without casting to matrix
pose = gtsam.Pose3(self.particles['rotation'][i], self.particles['position'][i])
new_pose = gtsam.Pose3(pose.matrix() @ delta_pose.matrix())
new_position = new_pose.translation()
self.particles['position'][i][0] = new_position[0]
self.particles['position'][i][1] = new_position[1]
self.particles['position'][i][2] = new_position[2]
self.particles['rotation'][i] = new_pose.rotation()
n1 = r_x * np.random.normal()
n2 = r_y * np.random.normal()
n3 = r_z * np.random.normal()
self.particles['rotation'][i] = gtsam.Rot3(self.particles['rotation'][i].retract(np.array([n1, n2, n3])).matrix())
self.particles['position'][:,0] += (p_x * np.random.normal(size = (self.particles['position'].shape[0])))
self.particles['position'][:,1] += (p_y * np.random.normal(size = (self.particles['position'].shape[0])))
self.particles['position'][:,2] += (p_z * np.random.normal(size = (self.particles['position'].shape[0])))
self.particle_lock.release()
def update(self):
# use fourth power
self.weights = np.square(self.weights)
self.weights = np.square(self.weights)
# normalize weights
sum_weights=np.sum(self.weights)
# print("pre-normalized weight sum", sum_weights)
self.weights=self.weights / sum_weights
#resample
self.particle_lock.acquire()
choice = np.random.choice(self.num_particles, self.num_particles, p = self.weights, replace=True)
temp = {'position':np.copy(self.particles['position'])[choice, :], 'rotation':np.copy(self.particles['rotation'])[choice]}
self.particles = temp
self.particle_lock.release()
def compute_simple_position_average(self):
# Simple averaging does not use weighted average or k means.
avg_pose = np.average(self.particles['position'], axis=0)
return avg_pose
def compute_weighted_position_average(self):
avg_pose = np.average(self.particles['position'], weights=self.weights, axis=0)
return avg_pose
def compute_simple_rotation_average(self):
# Simple averaging does not use weighted average or k means.
# https://users.cecs.anu.edu.au/~hartley/Papers/PDF/Hartley-Trumpf:Rotation-averaging:IJCV.pdf section 5.3 Algorithm 1
epsilon = 0.000001
max_iters = 300
rotations = self.particles['rotation']
R = rotations[0]
for i in range(max_iters):
rot_sum = np.zeros((3))
for rot in rotations:
rot_sum = rot_sum + gtsam.Rot3.Logmap(gtsam.Rot3(R.transpose() @ rot.matrix()))
r = rot_sum / len(rotations)
if np.linalg.norm(r) < epsilon:
# print("rotation averaging converged at iteration: ", i)
# print("average rotation: ", R)
return R
else:
# TODO do the matrix math in gtsam to avoid all the type casting
R = gtsam.Rot3(R.matrix() @ gtsam.Rot3.Expmap(r).matrix())
================================================
FILE: src/render_helpers.py
================================================
import os
import torch
from tqdm import tqdm
import time
import imageio
import numpy as np
import torch.nn.functional as F
#from torchsearchsorted import searchsorted
from utils import to8b
DEBUG = False
# most of this script is from iNeRF https://github.com/salykovaa/inerf
# and NeRF-Pytorch https://github.com/yenchenlin/nerf-pytorch/blob/master/load_llff.py
# Ray helpers
def get_rays(H, W, focal, c2w):
i, j = torch.meshgrid(torch.linspace(0, W-1, W), torch.linspace(0, H-1, H)) # pytorch's meshgrid has indexing='ij'
i = i.t()
j = j.t()
dirs = torch.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -torch.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = torch.sum(dirs[..., np.newaxis, :] * c2w[:3,:3], -1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = c2w[:3,-1].expand(rays_d.shape)
return rays_o, rays_d
def get_rays_np(H, W, focal, c2w):
i, j = np.meshgrid(np.arange(W, dtype=np.float32), np.arange(H, dtype=np.float32), indexing='xy')
dirs = np.stack([(i - W * .5) / focal, -(j - H * .5) / focal, -np.ones_like(i)], -1)
# Rotate ray directions from camera frame to the world frame
rays_d = np.sum(dirs[..., np.newaxis, :] * c2w[:3, :3],
-1) # dot product, equals to: [c2w.dot(dir) for dir in dirs]
# Translate camera frame's origin to the world frame. It is the origin of all rays.
rays_o = np.broadcast_to(c2w[:3, -1], np.shape(rays_d))
return rays_o, rays_d
def ndc_rays(H, W, focal, near, rays_o, rays_d):
# Shift ray origins to near plane
t = -(near + rays_o[..., 2]) / rays_d[..., 2]
rays_o = rays_o + t[..., None] * rays_d
# Projection
o0 = -1. / (W / (2. * focal)) * rays_o[..., 0] / rays_o[..., 2]
o1 = -1. / (H / (2. * focal)) * rays_o[..., 1] / rays_o[..., 2]
o2 = 1. + 2. * near / rays_o[..., 2]
d0 = -1. / (W / (2. * focal)) * (rays_d[..., 0] / rays_d[..., 2] - rays_o[..., 0] / rays_o[..., 2])
d1 = -1. / (H / (2. * focal)) * (rays_d[..., 1] / rays_d[..., 2] - rays_o[..., 1] / rays_o[..., 2])
d2 = -2. * near / rays_o[..., 2]
rays_o = torch.stack([o0, o1, o2], -1)
rays_d = torch.stack([d0, d1, d2], -1)
return rays_o, rays_d
# Hierarchical sampling (section 5.2)
def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
# Get pdf
weights = weights + 1e-5 # prevent nans
pdf = weights / torch.sum(weights, -1, keepdim=True)
cdf = torch.cumsum(pdf, -1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) # (batch, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0., 1., steps=N_samples)
u = u.expand(list(cdf.shape[:-1]) + [N_samples])
else:
u = torch.rand(list(cdf.shape[:-1]) + [N_samples])
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
print("pytest is true")
np.random.seed(0)
new_shape = list(cdf.shape[:-1]) + [N_samples]
if det:
u = np.linspace(0., 1., N_samples)
u = np.broadcast_to(u, new_shape)
else:
u = np.random.rand(*new_shape)
u = torch.Tensor(u)
# Invert CDF
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack([below, above], -1) # (batch, N_samples, 2)
# cdf_g = tf.gather(cdf, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
# bins_g = tf.gather(bins, inds_g, axis=-1, batch_dims=len(inds_g.shape)-2)
matched_shape = [inds_g.shape[0], inds_g.shape[1], cdf.shape[-1]]
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = (cdf_g[..., 1] - cdf_g[..., 0])
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
def raw2outputs(raw, z_vals, rays_d, raw_noise_std=0, white_bkgd=False, pytest=False):
"""Transforms model's predictions to semantically meaningful values.
Args:
raw: [num_rays, num_samples along ray, 4]. Prediction from model.
z_vals: [num_rays, num_samples along ray]. Integration time.
rays_d: [num_rays, 3]. Direction of each ray.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray.
disp_map: [num_rays]. Disparity map. Inverse of depth map.
acc_map: [num_rays]. Sum of weights along each ray.
weights: [num_rays, num_samples]. Weights assigned to each sampled color.
depth_map: [num_rays]. Estimated distance to object.
"""
raw2alpha = lambda raw, dists, act_fn=F.relu: 1.-torch.exp(-act_fn(raw)*dists)
dists = z_vals[...,1:] - z_vals[...,:-1]
dists = torch.cat([dists, torch.Tensor([1e10]).expand(dists[...,:1].shape)], -1) # [N_rays, N_samples]
dists = dists * torch.norm(rays_d[...,None,:], dim=-1)
rgb = torch.sigmoid(raw[...,:3]) # [N_rays, N_samples, 3]
noise = 0.
if raw_noise_std > 0.:
noise = torch.randn(raw[...,3].shape) * raw_noise_std
# Overwrite randomly sampled data if pytest
if pytest:
np.random.seed(0)
noise = np.random.rand(*list(raw[...,3].shape)) * raw_noise_std
noise = torch.Tensor(noise)
alpha = raw2alpha(raw[...,3] + noise, dists) # [N_rays, N_samples]
# weights = alpha * tf.math.cumprod(1.-alpha + 1e-10, -1, exclusive=True)
weights = alpha * torch.cumprod(torch.cat([torch.ones((alpha.shape[0], 1)), 1.-alpha + 1e-10], -1), -1)[:, :-1]
rgb_map = torch.sum(weights[...,None] * rgb, -2) # [N_rays, 3]
depth_map = torch.sum(weights * z_vals, -1)
disp_map = 1./torch.max(1e-10 * torch.ones_like(depth_map), depth_map / torch.sum(weights, -1))
acc_map = torch.sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1.-acc_map[...,None])
return rgb_map, disp_map, acc_map, weights, depth_map
def render_rays(ray_batch,
network_fn,
network_query_fn,
N_samples,
retraw=False,
lindisp=False,
perturb=0.,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.,
verbose=False,
pytest=False):
"""Volumetric rendering.
Args:
ray_batch: array of shape [batch_size, ...]. All information necessary
for sampling along a ray, including: ray origin, ray direction, min
dist, max dist, and unit-magnitude viewing direction.
network_fn: function. Model for predicting RGB and density at each point
in space.
network_query_fn: function used for passing queries to network_fn.
N_samples: int. Number of different times to sample along each ray.
retraw: bool. If True, include model's raw, unprocessed predictions.
lindisp: bool. If True, sample linearly in inverse depth rather than in depth.
perturb: float, 0 or 1. If non-zero, each ray is sampled at stratified
random points in time.
N_importance: int. Number of additional times to sample along each ray.
These samples are only passed to network_fine.
network_fine: "fine" network with same spec as network_fn.
white_bkgd: bool. If True, assume a white background.
raw_noise_std: ...
verbose: bool. If True, print more debugging info.
Returns:
rgb_map: [num_rays, 3]. Estimated RGB color of a ray. Comes from fine model.
disp_map: [num_rays]. Disparity map. 1 / depth.
acc_map: [num_rays]. Accumulated opacity along each ray. Comes from fine model.
raw: [num_rays, num_samples, 4]. Raw predictions from model.
rgb0: See rgb_map. Output for coarse model.
disp0: See disp_map. Output for coarse model.
acc0: See acc_map. Output for coarse model.
z_std: [num_rays]. Standard deviation of distances along ray for each
sample.
"""
N_rays = ray_batch.shape[0]
rays_o, rays_d = ray_batch[:,0:3], ray_batch[:,3:6] # [N_rays, 3] each
viewdirs = ray_batch[:,-3:] if ray_batch.shape[-1] > 8 else None
bounds = torch.reshape(ray_batch[...,6:8], [-1,1,2])
near, far = bounds[...,0], bounds[...,1] # [-1,1]
t_vals = torch.linspace(0., 1., steps=N_samples)
if not lindisp:
z_vals = near * (1.-t_vals) + far * (t_vals)
else:
z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))
z_vals = z_vals.expand([N_rays, N_samples])
if perturb > 0.:
# get intervals between samples
mids = .5 * (z_vals[...,1:] + z_vals[...,:-1])
upper = torch.cat([mids, z_vals[...,-1:]], -1)
lower = torch.cat([z_vals[...,:1], mids], -1)
# stratified samples in those intervals
t_rand = torch.rand(z_vals.shape)
# Pytest, overwrite u with numpy's fixed random numbers
if pytest:
np.random.seed(0)
t_rand = np.random.rand(*list(z_vals.shape))
t_rand = torch.Tensor(t_rand)
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples, 3]
# raw = run_network(pts)
raw = network_query_fn(pts, viewdirs, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0 = rgb_map, disp_map, acc_map
z_vals_mid = .5 * (z_vals[...,1:] + z_vals[...,:-1])
z_samples = sample_pdf(z_vals_mid, weights[...,1:-1], N_importance, det=(perturb==0.), pytest=pytest)
z_samples = z_samples.detach()
z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)
pts = rays_o[...,None,:] + rays_d[...,None,:] * z_vals[...,:,None] # [N_rays, N_samples + N_importance, 3]
run_fn = network_fn if network_fine is None else network_fine
# raw = run_network(pts, fn=run_fn)
raw = network_query_fn(pts, viewdirs, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d, raw_noise_std, white_bkgd, pytest=pytest)
ret = {'rgb_map' : rgb_map, 'disp_map' : disp_map, 'acc_map' : acc_map}
if retraw:
ret['raw'] = raw
if N_importance > 0:
ret['rgb0'] = rgb_map_0
ret['disp0'] = disp_map_0
ret['acc0'] = acc_map_0
ret['z_std'] = torch.std(z_samples, dim=-1, unbiased=False) # [N_rays]
for k in ret:
if (torch.isnan(ret[k]).any() or torch.isinf(ret[k]).any()) and DEBUG:
print(f"! [Numerical Error] {k} contains nan or inf.")
return ret
def batchify_rays(rays_flat, chunk=1024*32, **kwargs):
"""Render rays in smaller minibatches to avoid OOM.
"""
all_ret = {}
for i in range(0, rays_flat.shape[0], chunk):
ret = render_rays(rays_flat[i:i+chunk], **kwargs)
for k in ret:
if k not in all_ret:
all_ret[k] = []
all_ret[k].append(ret[k])
all_ret = {k : torch.cat(all_ret[k], 0) for k in all_ret}
return all_ret
def render(H, W, focal, chunk=1024*32, rays=None, c2w=None, ndc=True,
near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None,
**kwargs):
"""Render rays
Args:
H: int. Height of image in pixels.
W: int. Width of image in pixels.
focal: float. Focal length of pinhole camera.
chunk: int. Maximum number of rays to process simultaneously. Used to
control maximum memory usage. Does not affect final results.
rays: array of shape [2, batch_size, 3]. Ray origin and direction for
each example in batch.
c2w: array of shape [3, 4]. Camera-to-world transformation matrix.
ndc: bool. If True, represent ray origin, direction in NDC coordinates.
near: float or array of shape [batch_size]. Nearest distance for a ray.
far: float or array of shape [batch_size]. Farthest distance for a ray.
use_viewdirs: bool. If True, use viewing direction of a point in space in model.
c2w_staticcam: array of shape [3, 4]. If not None, use this transformation matrix for
camera while using other c2w argument for viewing directions.
Returns:
rgb_map: [batch_size, 3]. Predicted RGB values for rays.
disp_map: [batch_size]. Disparity map. Inverse of depth.
acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
extras: dict with everything returned by render_rays().
"""
if c2w is not None:
# special case to render full image
rays_o, rays_d = get_rays(H, W, focal, c2w)
else:
# use provided ray batch
rays_o, rays_d = rays
if use_viewdirs:
# provide ray directions as input
viewdirs = rays_d
if c2w_staticcam is not None:
# special case to visualize effect of viewdirs
rays_o, rays_d = get_rays(H, W, focal, c2w_staticcam)
viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
viewdirs = torch.reshape(viewdirs, [-1,3]).float()
sh = rays_d.shape # [..., 3]
if ndc:
# for forward facing scenes
rays_o, rays_d = ndc_rays(H, W, focal, 1., rays_o, rays_d)
# Create ray batch
rays_o = torch.reshape(rays_o, [-1,3]).float()
rays_d = torch.reshape(rays_d, [-1,3]).float()
near, far = near * torch.ones_like(rays_d[...,:1]), far * torch.ones_like(rays_d[...,:1])
rays = torch.cat([rays_o, rays_d, near, far], -1)
if use_viewdirs:
rays = torch.cat([rays, viewdirs], -1)
# Render and reshape
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1]) + list(all_ret[k].shape[1:])
all_ret[k] = torch.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k : all_ret[k] for k in all_ret if k not in k_extract}
return ret_list + [ret_dict]
def render_path(render_poses, hwf, chunk, render_kwargs, gt_imgs=None, savedir=None, render_factor=0):
H, W, focal = hwf
if render_factor!=0:
# Render downsampled for speed
H = H//render_factor
W = W//render_factor
focal = focal/render_factor
rgbs = []
disps = []
t = time.time()
for i, c2w in enumerate(tqdm(render_poses)):
print(i, time.time() - t)
t = time.time()
rgb, disp, acc, _ = render(H, W, focal, chunk=chunk, c2w=c2w[:3,:4], **render_kwargs)
rgbs.append(rgb.cpu().numpy())
disps.append(disp.cpu().numpy())
if i==0:
print(rgb.shape, disp.shape)
"""
if gt_imgs is not None and render_factor==0:
p = -10. * np.log10(np.mean(np.square(rgb.cpu().numpy() - gt_imgs[i])))
print(p)
"""
if savedir is not None:
rgb8 = to8b(rgbs[-1])
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
return rgbs, disps
================================================
FILE: src/utils.py
================================================
import torch
import numpy as np
import imageio
import cv2
import json
import os
# most of this script is adapted from iNeRF https://github.com/salykovaa/inerf
# and NeRF-Pytorch https://github.com/yenchenlin/nerf-pytorch/blob/master/load_llff.py
rot_psi = lambda phi: np.array([
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi), np.cos(phi), 0],
[0, 0, 0, 1]])
rot_theta = lambda th: np.array([
[np.cos(th), 0, -np.sin(th), 0],
[0, 1, 0, 0],
[np.sin(th), 0, np.cos(th), 0],
[0, 0, 0, 1]])
rot_phi = lambda psi: np.array([
[np.cos(psi), -np.sin(psi), 0, 0],
[np.sin(psi), np.cos(psi), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
trans_t = lambda x,y,z: np.array([
[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]])
def load_blender(data_dir, model_name, obs_img_num, half_res, white_bkgd, *kwargs):
with open(os.path.join(data_dir + str(model_name) + "/obs_imgs/", 'transforms.json'), 'r') as fp:
meta = json.load(fp)
frames = meta['frames']
img_path = os.path.join(data_dir + str(model_name) + "/obs_imgs/", frames[obs_img_num]['file_path'] + '.png')
img_rgba = imageio.imread(img_path)
img_rgba = (np.array(img_rgba) / 255.).astype(np.float32) # rgba image of type float32
H, W = img_rgba.shape[:2]
camera_angle_x = float(meta['camera_angle_x'])
focal = .5 * W / np.tan(.5 * camera_angle_x)
if white_bkgd:
img_rgb = img_rgba[..., :3] * img_rgba[..., -1:] + (1. - img_rgba[..., -1:])
else:
img_rgb = img_rgba[..., :3]
if half_res:
H = H // 2
W = W // 2
focal = focal / 2.
img_rgb = cv2.resize(img_rgb, (W, H), interpolation=cv2.INTER_AREA)
img_rgb = np.asarray(img_rgb*255, dtype=np.uint8)
obs_img_pose = np.array(frames[obs_img_num]['transform_matrix']).astype(np.float32)
phi, theta, psi, x, y, z = kwargs
start_pose = trans_t(x, y, z) @ rot_phi(phi/180.*np.pi) @ rot_theta(theta/180.*np.pi) @ rot_psi(psi/180.*np.pi) @ obs_img_pose
return img_rgb, [H, W, focal], start_pose, obs_img_pose # image of type uint8
def get_pose(phi, theta, psi, x, y, z, obs_img_pose, center_about_true_pose):
if center_about_true_pose:
# print("recentering")
# print(obs_img_pose)
pose = trans_t(x, y, z) @ rot_phi(phi/180.*np.pi) @ rot_theta(theta/180.*np.pi) @ rot_psi(psi/180.*np.pi) @ obs_img_pose
else:
pose = trans_t(x, y, z) @ rot_phi(phi/180.*np.pi) @ rot_theta(theta/180.*np.pi) @ rot_psi(psi/180.*np.pi)
return pose
def rgb2bgr(img_rgb):
img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
return img_bgr
def show_img(title, img_rgb): # img - rgb image
img_bgr = rgb2bgr(img_rgb)
cv2.imshow(title, img_bgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
def find_POI(img_rgb, num_points, DEBUG=False): # img - RGB image in range 0...255
img = np.copy(img_rgb)
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sift = cv2.SIFT_create()
keypoints = sift.detect(img_gray, None)
if DEBUG:
img = cv2.drawKeypoints(img_gray, keypoints, img)
show_img("Detected points", img)
xy = [keypoint.pt for keypoint in keypoints]
xy = np.array(xy).astype(int)
# Remove duplicate points
xy_set = set(tuple(point) for point in xy)
xy = np.array([list(point) for point in xy_set]).astype(int)
return xy # pixel coordinates
# Misc
img2mse = lambda x, y : torch.mean((x - y) ** 2)
mse2psnr = lambda x : -10. * torch.log(x) / torch.log(torch.Tensor([10.]))
to8b = lambda x : (255*np.clip(x,0,1)).astype(np.uint8)
# Load llff data
########## Slightly modified version of LLFF data loading code
########## see https://github.com/Fyusion/LLFF for original
def _minify(basedir, factors=[], resolutions=[]):
needtoload = False
for r in factors:
imgdir = os.path.join(basedir, 'images_{}'.format(r))
if not os.path.exists(imgdir):
needtoload = True
for r in resolutions:
imgdir = os.path.join(basedir, 'images_{}x{}'.format(r[1], r[0]))
if not os.path.exists(imgdir):
needtoload = True
if not needtoload:
return
from subprocess import check_output
imgdir = os.path.join(basedir, 'images')
imgs = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir))]
imgs = [f for f in imgs if any([f.endswith(ex) for ex in ['JPG', 'jpg', 'png', 'jpeg', 'PNG']])]
imgdir_orig = imgdir
wd = os.getcwd()
for r in factors + resolutions:
if isinstance(r, int):
name = 'images_{}'.format(r)
resizearg = '{}%'.format(100. / r)
else:
name = 'images_{}x{}'.format(r[1], r[0])
resizearg = '{}x{}'.format(r[1], r[0])
imgdir = os.path.join(basedir, name)
if os.path.exists(imgdir):
continue
print('Minifying', r, basedir)
os.makedirs(imgdir)
check_output('cp {}/* {}'.format(imgdir_orig, imgdir), shell=True)
ext = imgs[0].split('.')[-1]
args = ' '.join(['mogrify', '-resize', resizearg, '-format', 'png', '*.{}'.format(ext)])
print(args)
os.chdir(imgdir)
check_output(args, shell=True)
os.chdir(wd)
if ext != 'png':
check_output('rm {}/*.{}'.format(imgdir, ext), shell=True)
print('Removed duplicates')
print('Done')
def _load_data(basedir, factor=None, width=None, height=None, load_imgs=True):
poses_arr = np.load(os.path.join(basedir, 'poses_bounds.npy'))
poses = poses_arr[:, :-2].reshape([-1, 3, 5]).transpose([1, 2, 0])
bds = poses_arr[:, -2:].transpose([1, 0])
img0 = [os.path.join(basedir, 'images', f) for f in sorted(os.listdir(os.path.join(basedir, 'images'))) \
if f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')][0]
sh = imageio.imread(img0).shape
sfx = ''
if factor is not None:
sfx = '_{}'.format(factor)
_minify(basedir, factors=[factor])
factor = factor
elif height is not None:
factor = sh[0] / float(height)
width = int(sh[1] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
elif width is not None:
factor = sh[1] / float(width)
height = int(sh[0] / factor)
_minify(basedir, resolutions=[[height, width]])
sfx = '_{}x{}'.format(width, height)
else:
factor = 1
imgdir = os.path.join(basedir, 'images' + sfx)
if not os.path.exists(imgdir):
print(imgdir, 'does not exist, returning')
return
imgfiles = [os.path.join(imgdir, f) for f in sorted(os.listdir(imgdir)) if
f.endswith('JPG') or f.endswith('jpg') or f.endswith('png')]
if poses.shape[-1] != len(imgfiles):
print('Mismatch between imgs {} and poses {} !!!!'.format(len(imgfiles), poses.shape[-1]))
return
sh = imageio.imread(imgfiles[0]).shape
poses[:2, 4, :] = np.array(sh[:2]).reshape([2, 1])
poses[2, 4, :] = poses[2, 4, :] * 1. / factor
if not load_imgs:
return poses, bds
def imread(f):
if f.endswith('png'):
return imageio.imread(f, ignoregamma=True)
else:
return imageio.imread(f)
imgs = imgs = [imread(f)[..., :3] / 255. for f in imgfiles]
imgs = np.stack(imgs, -1)
# print('Loaded image data', imgs.shape, poses[:, -1, 0])
return poses, bds, imgs
def normalize(x):
return x / np.linalg.norm(x)
def viewmatrix(z, up, pos):
vec2 = normalize(z)
vec1_avg = up
vec0 = normalize(np.cross(vec1_avg, vec2))
vec1 = normalize(np.cross(vec2, vec0))
m = np.stack([vec0, vec1, vec2, pos], 1)
return m
def ptstocam(pts, c2w):
tt = np.matmul(c2w[:3, :3].T, (pts - c2w[:3, 3])[..., np.newaxis])[..., 0]
return tt
def poses_avg(poses):
hwf = poses[0, :3, -1:]
center = poses[:, :3, 3].mean(0)
vec2 = normalize(poses[:, :3, 2].sum(0))
up = poses[:, :3, 1].sum(0)
c2w = np.concatenate([viewmatrix(vec2, up, center), hwf], 1)
return c2w
def recenter_poses(poses):
poses_ = poses + 0
bottom = np.reshape([0, 0, 0, 1.], [1, 4])
c2w = poses_avg(poses)
c2w = np.concatenate([c2w[:3, :4], bottom], -2)
bottom = np.tile(np.reshape(bottom, [1, 1, 4]), [poses.shape[0], 1, 1])
poses = np.concatenate([poses[:, :3, :4], bottom], -2)
poses = np.linalg.inv(c2w) @ poses
poses_[:, :3, :4] = poses[:, :3, :4]
poses = poses_
return poses
#####################
def spherify_poses(poses, bds):
p34_to_44 = lambda p: np.concatenate([p, np.tile(np.reshape(np.eye(4)[-1, :], [1, 1, 4]), [p.shape[0], 1, 1])], 1)
rays_d = poses[:, :3, 2:3]
rays_o = poses[:, :3, 3:4]
def min_line_dist(rays_o, rays_d):
A_i = np.eye(3) - rays_d * np.transpose(rays_d, [0, 2, 1])
b_i = -A_i @ rays_o
pt_mindist = np.squeeze(-np.linalg.inv((np.transpose(A_i, [0, 2, 1]) @ A_i).mean(0)) @ (b_i).mean(0))
return pt_mindist
pt_mindist = min_line_dist(rays_o, rays_d)
center = pt_mindist
up = (poses[:, :3, 3] - center).mean(0)
vec0 = normalize(up)
vec1 = normalize(np.cross([.1, .2, .3], vec0))
vec2 = normalize(np.cross(vec0, vec1))
pos = center
c2w = np.stack([vec1, vec2, vec0, pos], 1)
poses_reset = np.linalg.inv(p34_to_44(c2w[None])) @ p34_to_44(poses[:, :3, :4])
rad = np.sqrt(np.mean(np.sum(np.square(poses_reset[:, :3, 3]), -1)))
sc = 1. / rad
poses_reset[:, :3, 3] *= sc
bds *= sc
rad *= sc
centroid = np.mean(poses_reset[:, :3, 3], 0)
zh = centroid[2]
poses_reset = np.concatenate(
[poses_reset[:, :3, :4], np.broadcast_to(poses[0, :3, -1:], poses_reset[:, :3, -1:].shape)], -1)
return poses_reset, bds
def load_llff_data(data_dir, model_name, obs_img_num, *kwargs, factor=8, recenter=True, bd_factor=.75, spherify=False):
poses, bds, imgs = _load_data(data_dir + "/", factor=factor) # factor=8 downsamples original imgs by 8x
# print('Loaded', data_dir + str(model_name) + "/", bds.min(), bds.max())
# Correct rotation matrix ordering and move variable dim to axis 0
poses = np.concatenate([poses[:, 1:2, :], -poses[:, 0:1, :], poses[:, 2:, :]], 1)
poses = np.moveaxis(poses, -1, 0).astype(np.float32)
images = np.moveaxis(imgs, -1, 0).astype(np.float32)
bds = np.moveaxis(bds, -1, 0).astype(np.float32)
# Rescale if bd_factor is provided
sc = 1. if bd_factor is None else 1. / (bds.min() * bd_factor)
poses[:, :3, 3] *= sc
bds *= sc
if recenter:
poses = recenter_poses(poses)
if spherify:
poses, bds = spherify_poses(poses, bds)
print("loading image number: ", obs_img_num)
#images = images.astype(np.float32)
images = np.asarray(images * 255, dtype=np.uint8)
poses = poses.astype(np.float32)
hwf = poses[0,:3,-1]
poses = poses[:,:3,:4]
obs_img = images[obs_img_num]
obs_img_pose = np.concatenate((poses[obs_img_num], np.array([[0,0,0,1.]])), axis=0)
phi, theta, psi, x, y, z = kwargs
start_pose = rot_phi(phi/180.*np.pi) @ rot_theta(theta/180.*np.pi) @ rot_psi(psi/180.*np.pi) @ trans_t(x, y, z) @ obs_img_pose
return obs_img, hwf, start_pose, obs_img_pose, bds
================================================
FILE: tools/eval_logs.py
================================================
import matplotlib.pyplot as plt
import numpy as np
import glob
import gtsam
import sys
######################## user params ########################
run_inerf = False
# only needed if you run iNeRF
sys.path.insert(0, '/home/dominic')
sys.path.insert(0, '/home/dominic/inerf') # path to iNeRF root directory
from inerf.run import run
# font size to use for plots
fontsize = 17
# plot mean error or plot ratio of trials with position error < 5 cm and rotation error < 5 degrees
get_mean_error = False
# directory where logs are stored
log_directory = '/home/dominic/mocNeRF_ws/src/nav/logs/inerf_compare'
num_starts_per_dataset = 5 # number of trials per dataset
datasets = ['fern', 'horns', 'fortress', 'room']
# datasets = ['fern']
# input number of iterations runs by iNeRF and M
# note this is used for plotting number of forward passes and not for telling iNeRF what params to use
params = {'inerf': {'num_iterations': 300, 'batch_size': 2048, 'course_samples':64, 'fine_samples':64}}
##############################################################
def check_if_position_error_good(gt_pose, est_pose, get_mean_error=False):
acceptable_error = 0.05 # meters
error = np.linalg.norm(gt_pose.translation() - est_pose.translation())
if get_mean_error:
return error
return error < acceptable_error
def check_if_rotation_error_good(gt_pose, est_pose, get_mean_error=False):
acceptable_error = 5.0 # degrees
est_t = np.transpose(est_pose.rotation().matrix())
r_ab = est_t @ gt_pose.rotation().matrix()
rot_error = np.rad2deg(np.arccos((np.trace(r_ab) - 1) / 2)) # TODO make 0 if nan
if get_mean_error:
return rot_error
return abs(rot_error) < acceptable_error
def average_arrays(axis_list, million_scale = True):
"""
average arrays of different size
adapted from https://stackoverflow.com/questions/49037902/how-to-interpolate-a-line-between-two-other-lines-in-python/49041142#49041142
forward_passes_all, accuracy
"""
min_max_xs = [(min(axis), max(axis)) for axis in axis_list[0]]
new_axis_xs = [np.linspace(min_x, max_x, 100) for min_x, max_x in min_max_xs]
new_axis_ys = []
for i in range(len(axis_list[0])):
new_axis_ys.append(np.interp(new_axis_xs[i], axis_list[0][i], axis_list[1][i]))
scale = 1.0
if million_scale:
scale = 1000000.0
midx = [np.mean([new_axis_xs[axis_idx][i] for axis_idx in range(len(axis_list[0]))])/scale for i in range(100)]
midy = [np.mean([new_axis_ys[axis_idx][i] for axis_idx in range(len(axis_list[0]))]) for i in range(100)]
return midx, midy
def get_ratio(alg, get_mean_error=False, prefix=""):
# alg can be 'mocnerf' or 'inerf'
if prefix != "":
prefix = prefix + "_"
if alg == 'inerf':
num_forward_passes_per_iteration = (params[alg]['course_samplese'] + params[alg]['fine_samplese']) * params[alg]['batch_size']
forward_passes_count = [i * num_forward_passes_per_iteration / 1000000.0 for i in range(params[alg]['num_iterations'] + 1)]
total_num_forward_passes_per_iteration = []
total_position_error_good = []
total_rotation_error_good = []
for dataset_index, dataset_name in enumerate(datasets):
gt_files = glob.glob(log_directory + '/gt_' + dataset_name + '*')
for start_num, gt_file in enumerate(gt_files):
img_num = gt_file.split('/')[-1].split('_')[2]
gt_pose = gtsam.Pose3(np.load(gt_file))
# print(gt_pose)
if run_inerf and alg == 'inerf':
# delta_phi, delta_theta, delta_psi, delta_x, delta_y, delta_z, obs_img_num, model_name
pertubation_file = log_directory + "/initial_pose_" + dataset_name + "_" + str(img_num) + "_" + "poses.npy"
pertubation_pose = np.load(pertubation_file)
experiment_params = [pertubation_pose[0] * 180.0/np.pi, pertubation_pose[1] * 180.0/np.pi, pertubation_pose[2] * 180.0/np.pi,
pertubation_pose[3], pertubation_pose[4], pertubation_pose[5],
int(img_num), dataset_name]
# print(experiment_params)
run(experiment_params, log_directory=log_directory, log_results=True)
est_file = log_directory + "/" + alg + "_" + prefix + dataset_name + "_" + str(img_num) + "_" + "poses.npy"
pose_estimates = np.load(est_file)
if alg == 'mocnerf':
iteration_file = log_directory + "/" + alg + "_" + prefix + dataset_name + "_" + str(img_num) + "_" + "forward_passes.npy"
num_forward_passes_per_iteration = np.load(iteration_file)
position_error_good = []
rotation_error_good = []
for pose_index in range(pose_estimates.shape[0]):
est_pose_matrix = gtsam.Pose3(pose_estimates[pose_index])
position_error_good.append(check_if_position_error_good(gt_pose, est_pose_matrix, get_mean_error))
rotation_error_good.append(check_if_rotation_error_good(gt_pose, est_pose_matrix, get_mean_error))
total_num_forward_passes_per_iteration.append(num_forward_passes_per_iteration)
total_position_error_good.append(position_error_good)
total_rotation_error_good.append(rotation_error_good)
if alg == 'inerf':
average_position_metric = np.average(np.array(total_position_error_good), axis=0)
average_rotation_metric = np.average(np.array(total_rotation_error_good), axis=0)
update_steps_count = None
else:
total_update_steps_per_iteration = [ [i for i in range(len(update_in_trial))] for update_in_trial in total_num_forward_passes_per_iteration]
forward_passes_count, average_position_metric = average_arrays([total_num_forward_passes_per_iteration, total_position_error_good])
forward_passes_count, average_rotation_metric = average_arrays([total_num_forward_passes_per_iteration, total_rotation_error_good])
update_steps_count, average_position_metric = average_arrays([total_update_steps_per_iteration, total_position_error_good], million_scale=False)
return forward_passes_count, average_position_metric, average_rotation_metric, update_steps_count
inerf_forward_passes_count, inerf_average_position_metric, inerf_average_rotation_metric, _ = get_ratio('inerf', get_mean_error)
mcl_forward_passes_count, mcl_average_position_metric, mcl_average_rotation_metric, _ = get_ratio('mocnerf', get_mean_error)
# Note the prefix argument can be used if you have multiple logs with different params from Loc-NeRF.
# For example, in this case we wanted to do an ablation study on running with and without annealing.
mcl_forward_passes_count_norefine, mcl_average_position_metric_norefine, mcl_average_rotation_metric_norefine, _ = get_ratio('mocnerf', get_mean_error, prefix="started")
position_plot = plt.figure(1, figsize=(8,4.5))
plt.plot(mcl_forward_passes_count, mcl_average_position_metric, label="Loc-NeRF w/ annealing", linewidth=2.5, color='b')
plt.plot(mcl_forward_passes_count_norefine, mcl_average_position_metric_norefine, label="Loc-NeRF w/o annealing", linewidth=2.5, color='k')
plt.plot(inerf_forward_passes_count[0:-1], inerf_average_position_metric[0:-1], label="iNeRF", linewidth=2.5, color='orange')
plt.xlabel("Number of Forward Passes (in millions)", fontsize=fontsize, family='Arial')
plt.grid()
plt.legend(loc='upper left', fontsize=fontsize-4)
if not get_mean_error:
plt.ylim([0, 1.01])
plt.title("Translation Accuracy Evaluation", fontsize=fontsize, family='Arial')
plt.ylabel("Ratio of Trials with Translation \n Error < 5 cm", fontsize=fontsize, family='Arial')
else:
plt.title("Average Translation Error for 20 Trials", fontsize=fontsize, family='Arial')
plt.ylabel("error (m)", fontsize=fontsize, family='Arial')
plt.tight_layout()
plt.show()
rotation_plot = plt.figure(2, figsize=(8,4.5))
plt.plot(mcl_forward_passes_count, mcl_average_rotation_metric, label="Loc-NeRF w/ annealing", linewidth=2.5, color='b')
plt.plot(mcl_forward_passes_count_norefine, mcl_average_rotation_metric_norefine, label="Loc-NeRF w/o annealing", linewidth=2.5, color='k')
plt.plot(inerf_forward_passes_count[0:-1], inerf_average_rotation_metric[0:-1], label="iNeRF", linewidth=2.5, color='orange')
plt.xlabel("Number of Forward Passes (in millions)", fontsize=fontsize, family='Arial')
plt.grid()
plt.legend(loc='upper right', fontsize=fontsize-4)
if not get_mean_error:
plt.ylim([0, 1.01])
plt.title("Rotation Accuracy Evaluation", fontsize=fontsize, family='Arial')
plt.ylabel("Ratio of Trials with Rotation Error < 5$^\circ$", fontsize=fontsize, family='Arial')
else:
plt.title("Average Rotation Error for 20 Trials", fontsize=fontsize, family='Arial')
plt.ylabel("error (deg)", fontsize=fontsize, family='Arial')
plt.tight_layout()
plt.show()
# To run iNeRF with this code use this (this makes sure that iNeRF params are set correctly for all runs):
# python3 eval_logs.py --config /home/dominic/inerf/configs/fern.txt --data_dir /home/dominic/inerf/data/nerf_llff_data/ --ckpt_dir /home/dominic/inerf/ckpts/ --N_importance 64 --batch_size 2048
================================================
FILE: tools/ros_to_jpg.py
================================================
import rosbag
import rospy
import numpy as np
import csv
import os
import cv2
from cv_bridge import CvBridge
######## user params ##########
TOPIC_NAME = "/camera/color/image_raw"
data_bag = rosbag.Bag("/home/dominic/<rosbag.bag>")
SAVE_LOC = "/home/dominic"
DATA_NAME = "nerf_hallway"
downsample = 10
###############################
os.makedirs(SAVE_LOC + "/" + DATA_NAME)
count = 0
index=0
for topic, msg, t in data_bag.read_messages(topics=[TOPIC_NAME]):
if topic == TOPIC_NAME:
if count%downsample==0:
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(msg)
cv_image.astype(np.uint8)
cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
cv2.imwrite(SAVE_LOC + "/" + DATA_NAME + "/r" + str(index) + '.jpg', cv_image)
index += 1
count += 1
gitextract_e4tgfkjc/
├── CMakeLists.txt
├── LICENSE
├── README.md
├── cfg/
│ ├── jackal.yaml
│ ├── llff.yaml
│ └── llff_global.yaml
├── launch/
│ └── navigate.launch
├── logs/
│ ├── backups/
│ │ ├── global_loc/
│ │ │ ├── gt_fern_15_poses.npy
│ │ │ ├── gt_fern_18_poses.npy
│ │ │ ├── gt_fern_2_poses.npy
│ │ │ ├── gt_fern_3_poses.npy
│ │ │ ├── gt_fern_7_poses.npy
│ │ │ ├── gt_fortress_11_poses.npy
│ │ │ ├── gt_fortress_16_poses.npy
│ │ │ ├── gt_fortress_19_poses.npy
│ │ │ ├── gt_fortress_1_poses.npy
│ │ │ ├── gt_fortress_9_poses.npy
│ │ │ ├── gt_horns_13_poses.npy
│ │ │ ├── gt_horns_1_poses.npy
│ │ │ ├── gt_horns_5_poses.npy
│ │ │ ├── gt_horns_6_poses.npy
│ │ │ ├── gt_horns_8_poses.npy
│ │ │ ├── gt_room_14_poses.npy
│ │ │ ├── gt_room_17_poses.npy
│ │ │ ├── gt_room_18_poses.npy
│ │ │ ├── gt_room_3_poses.npy
│ │ │ ├── gt_room_6_poses.npy
│ │ │ ├── initial_pose_fern_15_poses.npy
│ │ │ ├── initial_pose_fern_18_poses.npy
│ │ │ ├── initial_pose_fern_2_poses.npy
│ │ │ ├── initial_pose_fern_3_poses.npy
│ │ │ ├── initial_pose_fern_7_poses.npy
│ │ │ ├── initial_pose_fortress_11_poses.npy
│ │ │ ├── initial_pose_fortress_16_poses.npy
│ │ │ ├── initial_pose_fortress_19_poses.npy
│ │ │ ├── initial_pose_fortress_1_poses.npy
│ │ │ ├── initial_pose_fortress_9_poses.npy
│ │ │ ├── initial_pose_horns_13_poses.npy
│ │ │ ├── initial_pose_horns_1_poses.npy
│ │ │ ├── initial_pose_horns_5_poses.npy
│ │ │ ├── initial_pose_horns_6_poses.npy
│ │ │ ├── initial_pose_horns_8_poses.npy
│ │ │ ├── initial_pose_room_14_poses.npy
│ │ │ ├── initial_pose_room_17_poses.npy
│ │ │ ├── initial_pose_room_18_poses.npy
│ │ │ ├── initial_pose_room_3_poses.npy
│ │ │ ├── initial_pose_room_6_poses.npy
│ │ │ ├── mocnerf_fern_15_forward_passes.npy
│ │ │ ├── mocnerf_fern_15_poses.npy
│ │ │ ├── mocnerf_fern_18_forward_passes.npy
│ │ │ ├── mocnerf_fern_18_poses.npy
│ │ │ ├── mocnerf_fern_2_forward_passes.npy
│ │ │ ├── mocnerf_fern_2_poses.npy
│ │ │ ├── mocnerf_fern_3_forward_passes.npy
│ │ │ ├── mocnerf_fern_3_poses.npy
│ │ │ ├── mocnerf_fern_7_forward_passes.npy
│ │ │ ├── mocnerf_fern_7_poses.npy
│ │ │ ├── mocnerf_fortress_11_forward_passes.npy
│ │ │ ├── mocnerf_fortress_11_poses.npy
│ │ │ ├── mocnerf_fortress_16_forward_passes.npy
│ │ │ ├── mocnerf_fortress_16_poses.npy
│ │ │ ├── mocnerf_fortress_19_forward_passes.npy
│ │ │ ├── mocnerf_fortress_19_poses.npy
│ │ │ ├── mocnerf_fortress_1_forward_passes.npy
│ │ │ ├── mocnerf_fortress_1_poses.npy
│ │ │ ├── mocnerf_fortress_9_forward_passes.npy
│ │ │ ├── mocnerf_fortress_9_poses.npy
│ │ │ ├── mocnerf_horns_13_forward_passes.npy
│ │ │ ├── mocnerf_horns_13_poses.npy
│ │ │ ├── mocnerf_horns_1_forward_passes.npy
│ │ │ ├── mocnerf_horns_1_poses.npy
│ │ │ ├── mocnerf_horns_5_forward_passes.npy
│ │ │ ├── mocnerf_horns_5_poses.npy
│ │ │ ├── mocnerf_horns_6_forward_passes.npy
│ │ │ ├── mocnerf_horns_6_poses.npy
│ │ │ ├── mocnerf_horns_8_forward_passes.npy
│ │ │ ├── mocnerf_horns_8_poses.npy
│ │ │ ├── mocnerf_room_14_forward_passes.npy
│ │ │ ├── mocnerf_room_14_poses.npy
│ │ │ ├── mocnerf_room_17_forward_passes.npy
│ │ │ ├── mocnerf_room_17_poses.npy
│ │ │ ├── mocnerf_room_18_forward_passes.npy
│ │ │ ├── mocnerf_room_18_poses.npy
│ │ │ ├── mocnerf_room_3_forward_passes.npy
│ │ │ ├── mocnerf_room_3_poses.npy
│ │ │ ├── mocnerf_room_6_forward_passes.npy
│ │ │ ├── mocnerf_room_6_poses.npy
│ │ │ ├── mocnerf_started_fern_15_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_15_poses.npy
│ │ │ ├── mocnerf_started_fern_18_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_18_poses.npy
│ │ │ ├── mocnerf_started_fern_2_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_2_poses.npy
│ │ │ ├── mocnerf_started_fern_3_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_3_poses.npy
│ │ │ ├── mocnerf_started_fern_7_forward_passes.npy
│ │ │ ├── mocnerf_started_fern_7_poses.npy
│ │ │ ├── mocnerf_started_fortress_11_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_11_poses.npy
│ │ │ ├── mocnerf_started_fortress_16_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_16_poses.npy
│ │ │ ├── mocnerf_started_fortress_19_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_19_poses.npy
│ │ │ ├── mocnerf_started_fortress_1_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_1_poses.npy
│ │ │ ├── mocnerf_started_fortress_9_forward_passes.npy
│ │ │ ├── mocnerf_started_fortress_9_poses.npy
│ │ │ ├── mocnerf_started_horns_13_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_13_poses.npy
│ │ │ ├── mocnerf_started_horns_1_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_1_poses.npy
│ │ │ ├── mocnerf_started_horns_5_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_5_poses.npy
│ │ │ ├── mocnerf_started_horns_6_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_6_poses.npy
│ │ │ ├── mocnerf_started_horns_8_forward_passes.npy
│ │ │ ├── mocnerf_started_horns_8_poses.npy
│ │ │ ├── mocnerf_started_room_14_forward_passes.npy
│ │ │ ├── mocnerf_started_room_14_poses.npy
│ │ │ ├── mocnerf_started_room_17_forward_passes.npy
│ │ │ ├── mocnerf_started_room_17_poses.npy
│ │ │ ├── mocnerf_started_room_18_forward_passes.npy
│ │ │ ├── mocnerf_started_room_18_poses.npy
│ │ │ ├── mocnerf_started_room_3_forward_passes.npy
│ │ │ ├── mocnerf_started_room_3_poses.npy
│ │ │ ├── mocnerf_started_room_6_forward_passes.npy
│ │ │ └── mocnerf_started_room_6_poses.npy
│ │ └── inerf_compare/
│ │ ├── gt_fern_10_poses.npy
│ │ ├── gt_fern_12_poses.npy
│ │ ├── gt_fern_13_poses.npy
│ │ ├── gt_fern_18_poses.npy
│ │ ├── gt_fern_7_poses.npy
│ │ ├── gt_fortress_12_poses.npy
│ │ ├── gt_fortress_15_poses.npy
│ │ ├── gt_fortress_1_poses.npy
│ │ ├── gt_fortress_3_poses.npy
│ │ ├── gt_fortress_5_poses.npy
│ │ ├── gt_horns_0_poses.npy
│ │ ├── gt_horns_16_poses.npy
│ │ ├── gt_horns_17_poses.npy
│ │ ├── gt_horns_2_poses.npy
│ │ ├── gt_horns_7_poses.npy
│ │ ├── gt_room_16_poses.npy
│ │ ├── gt_room_3_poses.npy
│ │ ├── gt_room_6_poses.npy
│ │ ├── gt_room_7_poses.npy
│ │ ├── gt_room_8_poses.npy
│ │ ├── inerf_fern_10_poses.npy
│ │ ├── inerf_fern_12_poses.npy
│ │ ├── inerf_fern_13_poses.npy
│ │ ├── inerf_fern_18_poses.npy
│ │ ├── inerf_fern_7_poses.npy
│ │ ├── inerf_fortress_12_poses.npy
│ │ ├── inerf_fortress_15_poses.npy
│ │ ├── inerf_fortress_1_poses.npy
│ │ ├── inerf_fortress_3_poses.npy
│ │ ├── inerf_fortress_5_poses.npy
│ │ ├── inerf_horns_0_poses.npy
│ │ ├── inerf_horns_16_poses.npy
│ │ ├── inerf_horns_17_poses.npy
│ │ ├── inerf_horns_2_poses.npy
│ │ ├── inerf_horns_7_poses.npy
│ │ ├── inerf_room_16_poses.npy
│ │ ├── inerf_room_3_poses.npy
│ │ ├── inerf_room_6_poses.npy
│ │ ├── inerf_room_7_poses.npy
│ │ ├── inerf_room_8_poses.npy
│ │ ├── initial_pose_fern_10_poses.npy
│ │ ├── initial_pose_fern_12_poses.npy
│ │ ├── initial_pose_fern_13_poses.npy
│ │ ├── initial_pose_fern_18_poses.npy
│ │ ├── initial_pose_fern_7_poses.npy
│ │ ├── initial_pose_fortress_12_poses.npy
│ │ ├── initial_pose_fortress_15_poses.npy
│ │ ├── initial_pose_fortress_1_poses.npy
│ │ ├── initial_pose_fortress_3_poses.npy
│ │ ├── initial_pose_fortress_5_poses.npy
│ │ ├── initial_pose_horns_0_poses.npy
│ │ ├── initial_pose_horns_16_poses.npy
│ │ ├── initial_pose_horns_17_poses.npy
│ │ ├── initial_pose_horns_2_poses.npy
│ │ ├── initial_pose_horns_7_poses.npy
│ │ ├── initial_pose_room_16_poses.npy
│ │ ├── initial_pose_room_3_poses.npy
│ │ ├── initial_pose_room_6_poses.npy
│ │ ├── initial_pose_room_7_poses.npy
│ │ ├── initial_pose_room_8_poses.npy
│ │ ├── mocnerf_fern_10_forward_passes.npy
│ │ ├── mocnerf_fern_10_poses.npy
│ │ ├── mocnerf_fern_12_forward_passes.npy
│ │ ├── mocnerf_fern_12_poses.npy
│ │ ├── mocnerf_fern_13_forward_passes.npy
│ │ ├── mocnerf_fern_13_poses.npy
│ │ ├── mocnerf_fern_18_forward_passes.npy
│ │ ├── mocnerf_fern_18_poses.npy
│ │ ├── mocnerf_fern_7_forward_passes.npy
│ │ ├── mocnerf_fern_7_poses.npy
│ │ ├── mocnerf_fortress_12_forward_passes.npy
│ │ ├── mocnerf_fortress_12_poses.npy
│ │ ├── mocnerf_fortress_15_forward_passes.npy
│ │ ├── mocnerf_fortress_15_poses.npy
│ │ ├── mocnerf_fortress_1_forward_passes.npy
│ │ ├── mocnerf_fortress_1_poses.npy
│ │ ├── mocnerf_fortress_3_forward_passes.npy
│ │ ├── mocnerf_fortress_3_poses.npy
│ │ ├── mocnerf_fortress_5_forward_passes.npy
│ │ ├── mocnerf_fortress_5_poses.npy
│ │ ├── mocnerf_horns_0_forward_passes.npy
│ │ ├── mocnerf_horns_0_poses.npy
│ │ ├── mocnerf_horns_16_forward_passes.npy
│ │ ├── mocnerf_horns_16_poses.npy
│ │ ├── mocnerf_horns_17_forward_passes.npy
│ │ ├── mocnerf_horns_17_poses.npy
│ │ ├── mocnerf_horns_2_forward_passes.npy
│ │ ├── mocnerf_horns_2_poses.npy
│ │ ├── mocnerf_horns_7_forward_passes.npy
│ │ ├── mocnerf_horns_7_poses.npy
│ │ ├── mocnerf_room_16_forward_passes.npy
│ │ ├── mocnerf_room_16_poses.npy
│ │ ├── mocnerf_room_3_forward_passes.npy
│ │ ├── mocnerf_room_3_poses.npy
│ │ ├── mocnerf_room_6_forward_passes.npy
│ │ ├── mocnerf_room_6_poses.npy
│ │ ├── mocnerf_room_7_forward_passes.npy
│ │ ├── mocnerf_room_7_poses.npy
│ │ ├── mocnerf_room_8_forward_passes.npy
│ │ ├── mocnerf_room_8_poses.npy
│ │ ├── mocnerf_started_fern_10_forward_passes.npy
│ │ ├── mocnerf_started_fern_10_poses.npy
│ │ ├── mocnerf_started_fern_12_forward_passes.npy
│ │ ├── mocnerf_started_fern_12_poses.npy
│ │ ├── mocnerf_started_fern_13_forward_passes.npy
│ │ ├── mocnerf_started_fern_13_poses.npy
│ │ ├── mocnerf_started_fern_18_forward_passes.npy
│ │ ├── mocnerf_started_fern_18_poses.npy
│ │ ├── mocnerf_started_fern_7_forward_passes.npy
│ │ ├── mocnerf_started_fern_7_poses.npy
│ │ ├── mocnerf_started_fortress_12_forward_passes.npy
│ │ ├── mocnerf_started_fortress_12_poses.npy
│ │ ├── mocnerf_started_fortress_15_forward_passes.npy
│ │ ├── mocnerf_started_fortress_15_poses.npy
│ │ ├── mocnerf_started_fortress_1_forward_passes.npy
│ │ ├── mocnerf_started_fortress_1_poses.npy
│ │ ├── mocnerf_started_fortress_3_forward_passes.npy
│ │ ├── mocnerf_started_fortress_3_poses.npy
│ │ ├── mocnerf_started_fortress_5_forward_passes.npy
│ │ ├── mocnerf_started_fortress_5_poses.npy
│ │ ├── mocnerf_started_horns_0_forward_passes.npy
│ │ ├── mocnerf_started_horns_0_poses.npy
│ │ ├── mocnerf_started_horns_16_forward_passes.npy
│ │ ├── mocnerf_started_horns_16_poses.npy
│ │ ├── mocnerf_started_horns_17_forward_passes.npy
│ │ ├── mocnerf_started_horns_17_poses.npy
│ │ ├── mocnerf_started_horns_2_forward_passes.npy
│ │ ├── mocnerf_started_horns_2_poses.npy
│ │ ├── mocnerf_started_horns_7_forward_passes.npy
│ │ ├── mocnerf_started_horns_7_poses.npy
│ │ ├── mocnerf_started_room_16_forward_passes.npy
│ │ ├── mocnerf_started_room_16_poses.npy
│ │ ├── mocnerf_started_room_3_forward_passes.npy
│ │ ├── mocnerf_started_room_3_poses.npy
│ │ ├── mocnerf_started_room_6_forward_passes.npy
│ │ ├── mocnerf_started_room_6_poses.npy
│ │ ├── mocnerf_started_room_7_forward_passes.npy
│ │ ├── mocnerf_started_room_7_poses.npy
│ │ ├── mocnerf_started_room_8_forward_passes.npy
│ │ └── mocnerf_started_room_8_poses.npy
│ ├── global_loc/
│ │ ├── gt_fern_15_poses.npy
│ │ ├── gt_fern_18_poses.npy
│ │ ├── gt_fern_2_poses.npy
│ │ ├── gt_fern_3_poses.npy
│ │ ├── gt_fern_7_poses.npy
│ │ ├── gt_fortress_11_poses.npy
│ │ ├── gt_fortress_16_poses.npy
│ │ ├── gt_fortress_19_poses.npy
│ │ ├── gt_fortress_1_poses.npy
│ │ ├── gt_fortress_9_poses.npy
│ │ ├── gt_horns_13_poses.npy
│ │ ├── gt_horns_1_poses.npy
│ │ ├── gt_horns_5_poses.npy
│ │ ├── gt_horns_6_poses.npy
│ │ ├── gt_horns_8_poses.npy
│ │ ├── gt_room_14_poses.npy
│ │ ├── gt_room_17_poses.npy
│ │ ├── gt_room_18_poses.npy
│ │ ├── gt_room_3_poses.npy
│ │ ├── gt_room_6_poses.npy
│ │ ├── initial_pose_fern_15_poses.npy
│ │ ├── initial_pose_fern_18_poses.npy
│ │ ├── initial_pose_fern_2_poses.npy
│ │ ├── initial_pose_fern_3_poses.npy
│ │ ├── initial_pose_fern_7_poses.npy
│ │ ├── initial_pose_fortress_11_poses.npy
│ │ ├── initial_pose_fortress_16_poses.npy
│ │ ├── initial_pose_fortress_19_poses.npy
│ │ ├── initial_pose_fortress_1_poses.npy
│ │ ├── initial_pose_fortress_9_poses.npy
│ │ ├── initial_pose_horns_13_poses.npy
│ │ ├── initial_pose_horns_1_poses.npy
│ │ ├── initial_pose_horns_5_poses.npy
│ │ ├── initial_pose_horns_6_poses.npy
│ │ ├── initial_pose_horns_8_poses.npy
│ │ ├── initial_pose_room_14_poses.npy
│ │ ├── initial_pose_room_17_poses.npy
│ │ ├── initial_pose_room_18_poses.npy
│ │ ├── initial_pose_room_3_poses.npy
│ │ ├── initial_pose_room_6_poses.npy
│ │ ├── mocnerf_fern_15_forward_passes.npy
│ │ ├── mocnerf_fern_15_poses.npy
│ │ ├── mocnerf_fern_18_forward_passes.npy
│ │ ├── mocnerf_fern_18_poses.npy
│ │ ├── mocnerf_fern_2_forward_passes.npy
│ │ ├── mocnerf_fern_2_poses.npy
│ │ ├── mocnerf_fern_3_forward_passes.npy
│ │ ├── mocnerf_fern_3_poses.npy
│ │ ├── mocnerf_fern_7_forward_passes.npy
│ │ ├── mocnerf_fern_7_poses.npy
│ │ ├── mocnerf_fortress_11_forward_passes.npy
│ │ ├── mocnerf_fortress_11_poses.npy
│ │ ├── mocnerf_fortress_16_forward_passes.npy
│ │ ├── mocnerf_fortress_16_poses.npy
│ │ ├── mocnerf_fortress_19_forward_passes.npy
│ │ ├── mocnerf_fortress_19_poses.npy
│ │ ├── mocnerf_fortress_1_forward_passes.npy
│ │ ├── mocnerf_fortress_1_poses.npy
│ │ ├── mocnerf_fortress_9_forward_passes.npy
│ │ ├── mocnerf_fortress_9_poses.npy
│ │ ├── mocnerf_horns_13_forward_passes.npy
│ │ ├── mocnerf_horns_13_poses.npy
│ │ ├── mocnerf_horns_1_forward_passes.npy
│ │ ├── mocnerf_horns_1_poses.npy
│ │ ├── mocnerf_horns_5_forward_passes.npy
│ │ ├── mocnerf_horns_5_poses.npy
│ │ ├── mocnerf_horns_6_forward_passes.npy
│ │ ├── mocnerf_horns_6_poses.npy
│ │ ├── mocnerf_horns_8_forward_passes.npy
│ │ ├── mocnerf_horns_8_poses.npy
│ │ ├── mocnerf_room_14_forward_passes.npy
│ │ ├── mocnerf_room_14_poses.npy
│ │ ├── mocnerf_room_17_forward_passes.npy
│ │ ├── mocnerf_room_17_poses.npy
│ │ ├── mocnerf_room_18_forward_passes.npy
│ │ ├── mocnerf_room_18_poses.npy
│ │ ├── mocnerf_room_3_forward_passes.npy
│ │ ├── mocnerf_room_3_poses.npy
│ │ ├── mocnerf_room_6_forward_passes.npy
│ │ ├── mocnerf_room_6_poses.npy
│ │ ├── mocnerf_started_fern_15_forward_passes.npy
│ │ ├── mocnerf_started_fern_15_poses.npy
│ │ ├── mocnerf_started_fern_18_forward_passes.npy
│ │ ├── mocnerf_started_fern_18_poses.npy
│ │ ├── mocnerf_started_fern_2_forward_passes.npy
│ │ ├── mocnerf_started_fern_2_poses.npy
│ │ ├── mocnerf_started_fern_3_forward_passes.npy
│ │ ├── mocnerf_started_fern_3_poses.npy
│ │ ├── mocnerf_started_fern_7_forward_passes.npy
│ │ ├── mocnerf_started_fern_7_poses.npy
│ │ ├── mocnerf_started_fortress_11_forward_passes.npy
│ │ ├── mocnerf_started_fortress_11_poses.npy
│ │ ├── mocnerf_started_fortress_16_forward_passes.npy
│ │ ├── mocnerf_started_fortress_16_poses.npy
│ │ ├── mocnerf_started_fortress_19_forward_passes.npy
│ │ ├── mocnerf_started_fortress_19_poses.npy
│ │ ├── mocnerf_started_fortress_1_forward_passes.npy
│ │ ├── mocnerf_started_fortress_1_poses.npy
│ │ ├── mocnerf_started_fortress_9_forward_passes.npy
│ │ ├── mocnerf_started_fortress_9_poses.npy
│ │ ├── mocnerf_started_horns_13_forward_passes.npy
│ │ ├── mocnerf_started_horns_13_poses.npy
│ │ ├── mocnerf_started_horns_1_forward_passes.npy
│ │ ├── mocnerf_started_horns_1_poses.npy
│ │ ├── mocnerf_started_horns_5_forward_passes.npy
│ │ ├── mocnerf_started_horns_5_poses.npy
│ │ ├── mocnerf_started_horns_6_forward_passes.npy
│ │ ├── mocnerf_started_horns_6_poses.npy
│ │ ├── mocnerf_started_horns_8_forward_passes.npy
│ │ ├── mocnerf_started_horns_8_poses.npy
│ │ ├── mocnerf_started_room_14_forward_passes.npy
│ │ ├── mocnerf_started_room_14_poses.npy
│ │ ├── mocnerf_started_room_17_forward_passes.npy
│ │ ├── mocnerf_started_room_17_poses.npy
│ │ ├── mocnerf_started_room_18_forward_passes.npy
│ │ ├── mocnerf_started_room_18_poses.npy
│ │ ├── mocnerf_started_room_3_forward_passes.npy
│ │ ├── mocnerf_started_room_3_poses.npy
│ │ ├── mocnerf_started_room_6_forward_passes.npy
│ │ └── mocnerf_started_room_6_poses.npy
│ └── inerf_compare/
│ ├── gt_fern_10_poses.npy
│ ├── gt_fern_12_poses.npy
│ ├── gt_fern_13_poses.npy
│ ├── gt_fern_18_poses.npy
│ ├── gt_fern_7_poses.npy
│ ├── gt_fortress_12_poses.npy
│ ├── gt_fortress_15_poses.npy
│ ├── gt_fortress_1_poses.npy
│ ├── gt_fortress_3_poses.npy
│ ├── gt_fortress_5_poses.npy
│ ├── gt_horns_0_poses.npy
│ ├── gt_horns_16_poses.npy
│ ├── gt_horns_17_poses.npy
│ ├── gt_horns_2_poses.npy
│ ├── gt_horns_7_poses.npy
│ ├── gt_room_16_poses.npy
│ ├── gt_room_3_poses.npy
│ ├── gt_room_6_poses.npy
│ ├── gt_room_7_poses.npy
│ ├── gt_room_8_poses.npy
│ ├── inerf_fern_10_poses.npy
│ ├── inerf_fern_12_poses.npy
│ ├── inerf_fern_13_poses.npy
│ ├── inerf_fern_18_poses.npy
│ ├── inerf_fern_7_poses.npy
│ ├── inerf_fortress_12_poses.npy
│ ├── inerf_fortress_15_poses.npy
│ ├── inerf_fortress_1_poses.npy
│ ├── inerf_fortress_3_poses.npy
│ ├── inerf_fortress_5_poses.npy
│ ├── inerf_horns_0_poses.npy
│ ├── inerf_horns_16_poses.npy
│ ├── inerf_horns_17_poses.npy
│ ├── inerf_horns_2_poses.npy
│ ├── inerf_horns_7_poses.npy
│ ├── inerf_room_16_poses.npy
│ ├── inerf_room_3_poses.npy
│ ├── inerf_room_6_poses.npy
│ ├── inerf_room_7_poses.npy
│ ├── inerf_room_8_poses.npy
│ ├── initial_pose_fern_10_poses.npy
│ ├── initial_pose_fern_12_poses.npy
│ ├── initial_pose_fern_13_poses.npy
│ ├── initial_pose_fern_18_poses.npy
│ ├── initial_pose_fern_7_poses.npy
│ ├── initial_pose_fortress_12_poses.npy
│ ├── initial_pose_fortress_15_poses.npy
│ ├── initial_pose_fortress_1_poses.npy
│ ├── initial_pose_fortress_3_poses.npy
│ ├── initial_pose_fortress_5_poses.npy
│ ├── initial_pose_horns_0_poses.npy
│ ├── initial_pose_horns_16_poses.npy
│ ├── initial_pose_horns_17_poses.npy
│ ├── initial_pose_horns_2_poses.npy
│ ├── initial_pose_horns_7_poses.npy
│ ├── initial_pose_room_16_poses.npy
│ ├── initial_pose_room_3_poses.npy
│ ├── initial_pose_room_6_poses.npy
│ ├── initial_pose_room_7_poses.npy
│ ├── initial_pose_room_8_poses.npy
│ ├── mocnerf_fern_10_forward_passes.npy
│ ├── mocnerf_fern_10_poses.npy
│ ├── mocnerf_fern_12_forward_passes.npy
│ ├── mocnerf_fern_12_poses.npy
│ ├── mocnerf_fern_13_forward_passes.npy
│ ├── mocnerf_fern_13_poses.npy
│ ├── mocnerf_fern_18_forward_passes.npy
│ ├── mocnerf_fern_18_poses.npy
│ ├── mocnerf_fern_7_forward_passes.npy
│ ├── mocnerf_fern_7_poses.npy
│ ├── mocnerf_fortress_12_forward_passes.npy
│ ├── mocnerf_fortress_12_poses.npy
│ ├── mocnerf_fortress_15_forward_passes.npy
│ ├── mocnerf_fortress_15_poses.npy
│ ├── mocnerf_fortress_1_forward_passes.npy
│ ├── mocnerf_fortress_1_poses.npy
│ ├── mocnerf_fortress_3_forward_passes.npy
│ ├── mocnerf_fortress_3_poses.npy
│ ├── mocnerf_fortress_5_forward_passes.npy
│ ├── mocnerf_fortress_5_poses.npy
│ ├── mocnerf_horns_0_forward_passes.npy
│ ├── mocnerf_horns_0_poses.npy
│ ├── mocnerf_horns_16_forward_passes.npy
│ ├── mocnerf_horns_16_poses.npy
│ ├── mocnerf_horns_17_forward_passes.npy
│ ├── mocnerf_horns_17_poses.npy
│ ├── mocnerf_horns_2_forward_passes.npy
│ ├── mocnerf_horns_2_poses.npy
│ ├── mocnerf_horns_7_forward_passes.npy
│ ├── mocnerf_horns_7_poses.npy
│ ├── mocnerf_room_16_forward_passes.npy
│ ├── mocnerf_room_16_poses.npy
│ ├── mocnerf_room_3_forward_passes.npy
│ ├── mocnerf_room_3_poses.npy
│ ├── mocnerf_room_6_forward_passes.npy
│ ├── mocnerf_room_6_poses.npy
│ ├── mocnerf_room_7_forward_passes.npy
│ ├── mocnerf_room_7_poses.npy
│ ├── mocnerf_room_8_forward_passes.npy
│ ├── mocnerf_room_8_poses.npy
│ ├── mocnerf_started_fern_10_forward_passes.npy
│ ├── mocnerf_started_fern_10_poses.npy
│ ├── mocnerf_started_fern_12_forward_passes.npy
│ ├── mocnerf_started_fern_12_poses.npy
│ ├── mocnerf_started_fern_13_forward_passes.npy
│ ├── mocnerf_started_fern_13_poses.npy
│ ├── mocnerf_started_fern_18_forward_passes.npy
│ ├── mocnerf_started_fern_18_poses.npy
│ ├── mocnerf_started_fern_7_forward_passes.npy
│ ├── mocnerf_started_fern_7_poses.npy
│ ├── mocnerf_started_fortress_12_forward_passes.npy
│ ├── mocnerf_started_fortress_12_poses.npy
│ ├── mocnerf_started_fortress_15_forward_passes.npy
│ ├── mocnerf_started_fortress_15_poses.npy
│ ├── mocnerf_started_fortress_1_forward_passes.npy
│ ├── mocnerf_started_fortress_1_poses.npy
│ ├── mocnerf_started_fortress_3_forward_passes.npy
│ ├── mocnerf_started_fortress_3_poses.npy
│ ├── mocnerf_started_fortress_5_forward_passes.npy
│ ├── mocnerf_started_fortress_5_poses.npy
│ ├── mocnerf_started_horns_0_forward_passes.npy
│ ├── mocnerf_started_horns_0_poses.npy
│ ├── mocnerf_started_horns_16_forward_passes.npy
│ ├── mocnerf_started_horns_16_poses.npy
│ ├── mocnerf_started_horns_17_forward_passes.npy
│ ├── mocnerf_started_horns_17_poses.npy
│ ├── mocnerf_started_horns_2_forward_passes.npy
│ ├── mocnerf_started_horns_2_poses.npy
│ ├── mocnerf_started_horns_7_forward_passes.npy
│ ├── mocnerf_started_horns_7_poses.npy
│ ├── mocnerf_started_room_16_forward_passes.npy
│ ├── mocnerf_started_room_16_poses.npy
│ ├── mocnerf_started_room_3_forward_passes.npy
│ ├── mocnerf_started_room_3_poses.npy
│ ├── mocnerf_started_room_6_forward_passes.npy
│ ├── mocnerf_started_room_6_poses.npy
│ ├── mocnerf_started_room_7_forward_passes.npy
│ ├── mocnerf_started_room_7_poses.npy
│ ├── mocnerf_started_room_8_forward_passes.npy
│ └── mocnerf_started_room_8_poses.npy
├── package.xml
├── requirements.txt
├── rviz/
│ └── rviz.rviz
├── setup.py
├── src/
│ ├── full_filter.py
│ ├── full_nerf_helpers.py
│ ├── nav_node.py
│ ├── navigator_base.py
│ ├── particle_filter.py
│ ├── render_helpers.py
│ └── utils.py
└── tools/
├── eval_logs.py
└── ros_to_jpg.py
SYMBOL INDEX (71 symbols across 8 files)
FILE: src/full_filter.py
class NeRF (line 19) | class NeRF:
method __init__ (line 21) | def __init__(self, nerf_params):
method get_poi_interest_regions (line 90) | def get_poi_interest_regions(self, show_img=False, sampling_type = None):
method get_loss (line 104) | def get_loss(self, particles, batch, photometric_loss='rgb'):
method visualize_nerf_image (line 145) | def visualize_nerf_image(self, nerf_pose):
FILE: src/full_nerf_helpers.py
function load_nerf (line 12) | def load_nerf(nerf_params, device):
class Embedder (line 77) | class Embedder:
method __init__ (line 78) | def __init__(self, **kwargs):
method create_embedding_fn (line 82) | def create_embedding_fn(self):
method embed (line 106) | def embed(self, inputs):
function get_embedder (line 110) | def get_embedder(multires, i=0):
class NeRF (line 129) | class NeRF(nn.Module):
method __init__ (line 130) | def __init__(self, D=8, W=256, input_ch=3, input_ch_views=3, output_ch...
method forward (line 159) | def forward(self, x):
method load_weights_from_keras (line 184) | def load_weights_from_keras(self, weights):
function batchify (line 215) | def batchify(fn, chunk):
function run_network (line 225) | def run_network(inputs, viewdirs, fn, embed_fn, embeddirs_fn, netchunk=1...
FILE: src/nav_node.py
class Navigator (line 31) | class Navigator(NavigatorBase):
method __init__ (line 32) | def __init__(self, img_num=None, dataset_name=None):
method get_initial_distribution (line 72) | def get_initial_distribution(self):
method vio_callback (line 144) | def vio_callback(self, msg):
method rgb_callback (line 168) | def rgb_callback(self, msg):
method rgb_run (line 171) | def rgb_run(self, msg, get_rays_fn=None, render_full_image=False):
method check_if_position_error_good (line 277) | def check_if_position_error_good(self, return_error = False):
method check_if_rotation_error_good (line 293) | def check_if_rotation_error_good(self, return_error = False):
method run_predict (line 308) | def run_predict(self, delta_pose):
method set_initial_particles (line 314) | def set_initial_particles(self):
method set_noise (line 334) | def set_noise(self, scale):
method check_refine_gate (line 343) | def check_refine_gate(self):
method publish_pose_est (line 370) | def publish_pose_est(self, pose_est_gtsam, img_timestamp = None):
method visualize (line 395) | def visualize(self):
function average_arrays (line 434) | def average_arrays(axis_list):
FILE: src/navigator_base.py
class NavigatorBase (line 8) | class NavigatorBase:
method __init__ (line 9) | def __init__(self, img_num=0, dataset_name=None):
method check_params (line 128) | def check_params(self):
FILE: src/particle_filter.py
class ParticleFilter (line 6) | class ParticleFilter:
method __init__ (line 8) | def __init__(self, initial_particles):
method reduce_num_particles (line 14) | def reduce_num_particles(self, num_particles):
method predict_no_motion (line 22) | def predict_no_motion(self, p_x, p_y, p_z, r_x, r_y, r_z):
method predict_with_delta_pose (line 36) | def predict_with_delta_pose(self, delta_pose, p_x, p_y, p_z, r_x, r_y,...
method update (line 62) | def update(self):
method compute_simple_position_average (line 79) | def compute_simple_position_average(self):
method compute_weighted_position_average (line 84) | def compute_weighted_position_average(self):
method compute_simple_rotation_average (line 88) | def compute_simple_rotation_average(self):
FILE: src/render_helpers.py
function get_rays (line 16) | def get_rays(H, W, focal, c2w):
function get_rays_np (line 28) | def get_rays_np(H, W, focal, c2w):
function ndc_rays (line 39) | def ndc_rays(H, W, focal, near, rays_o, rays_d):
function sample_pdf (line 60) | def sample_pdf(bins, weights, N_samples, det=False, pytest=False):
function raw2outputs (line 106) | def raw2outputs(raw, z_vals, rays_d, raw_noise_std=0, white_bkgd=False, ...
function render_rays (line 152) | def render_rays(ray_batch,
function batchify_rays (line 264) | def batchify_rays(rays_flat, chunk=1024*32, **kwargs):
function render (line 279) | def render(H, W, focal, chunk=1024*32, rays=None, c2w=None, ndc=True,
function render_path (line 347) | def render_path(render_poses, hwf, chunk, render_kwargs, gt_imgs=None, s...
FILE: src/utils.py
function load_blender (line 35) | def load_blender(data_dir, model_name, obs_img_num, half_res, white_bkgd...
function get_pose (line 64) | def get_pose(phi, theta, psi, x, y, z, obs_img_pose, center_about_true_p...
function rgb2bgr (line 74) | def rgb2bgr(img_rgb):
function show_img (line 79) | def show_img(title, img_rgb): # img - rgb image
function find_POI (line 86) | def find_POI(img_rgb, num_points, DEBUG=False): # img - RGB image in ran...
function _minify (line 113) | def _minify(basedir, factors=[], resolutions=[]):
function _load_data (line 164) | def _load_data(basedir, factor=None, width=None, height=None, load_imgs=...
function normalize (line 223) | def normalize(x):
function viewmatrix (line 227) | def viewmatrix(z, up, pos):
function ptstocam (line 236) | def ptstocam(pts, c2w):
function poses_avg (line 241) | def poses_avg(poses):
function recenter_poses (line 252) | def recenter_poses(poses):
function spherify_poses (line 269) | def spherify_poses(poses, bds):
function load_llff_data (line 310) | def load_llff_data(data_dir, model_name, obs_img_num, *kwargs, factor=8,...
FILE: tools/eval_logs.py
function check_if_position_error_good (line 33) | def check_if_position_error_good(gt_pose, est_pose, get_mean_error=False):
function check_if_rotation_error_good (line 40) | def check_if_rotation_error_good(gt_pose, est_pose, get_mean_error=False):
function average_arrays (line 51) | def average_arrays(axis_list, million_scale = True):
function get_ratio (line 74) | def get_ratio(alg, get_mean_error=False, prefix=""):
Condensed preview — 540 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (128K chars).
[
{
"path": "CMakeLists.txt",
"chars": 389,
"preview": "cmake_minimum_required(VERSION 2.8.3)\nproject(locnerf)\n\nfind_package(catkin REQUIRED COMPONENTS\n geometry_msgs\n sensor"
},
{
"path": "LICENSE",
"chars": 1094,
"preview": "MIT License\n\nCopyright (c) 2022 Massachusetts Institute of Technology\n\nPermission is hereby granted, free of charge, to "
},
{
"path": "README.md",
"chars": 9689,
"preview": "# Loc-NeRF\n\nMonte Carlo Localization using Neural Radiance Fields. \n\n## Coordinate Frames\nTo be consistent throughout th"
},
{
"path": "cfg/jackal.yaml",
"chars": 4938,
"preview": "#### main filtering flags ####\nrun_predicts: True # Use odometry for predict step, otherwise use 0 mean gaussian noise.\n"
},
{
"path": "cfg/llff.yaml",
"chars": 4911,
"preview": "#### filtering flags ####\nrun_predicts: False # Use odometry for predict step, otherwise use 0 mean gaussian noise.\nuse_"
},
{
"path": "cfg/llff_global.yaml",
"chars": 4907,
"preview": "#### filtering flags ####\nrun_predicts: False # Use odometry for predict step, otherwise use 0 mean gaussian noise.\nuse_"
},
{
"path": "launch/navigate.launch",
"chars": 230,
"preview": "<launch>\n <arg name=\"parameter_file\" default=\"config.yaml\" />\n\n <rosparam command=\"load\" file=\"$(find locnerf)/cfg/$(a"
},
{
"path": "package.xml",
"chars": 453,
"preview": "<?xml version=\"1.0\"?>\n<package>\n <name>locnerf</name>\n <version>0.1.0</version>\n <description>Run particle filter wit"
},
{
"path": "requirements.txt",
"chars": 129,
"preview": "cv_bridge\ngtsam\nimageio\nmatplotlib\nnumpy\nrospy\nrostopic\nscipy\nsensor_msgs\nopencv_contrib_python\ntorch\ntorch_ema\ntorchvis"
},
{
"path": "rviz/rviz.rviz",
"chars": 6737,
"preview": "Panels:\n - Class: rviz/Displays\n Help Height: 78\n Name: Displays\n Property Tree Widget:\n Expanded:\n "
},
{
"path": "setup.py",
"chars": 196,
"preview": "from distutils.core import setup\nfrom catkin_pkg.python_setup import generate_distutils_setup\n\nargs = generate_distutils"
},
{
"path": "src/full_filter.py",
"chars": 6658,
"preview": "from matplotlib.markers import MarkerStyle\nimport torch\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nim"
},
{
"path": "src/full_nerf_helpers.py",
"chars": 9233,
"preview": "import torch\nimport os\ntorch.autograd.set_detect_anomaly(True)\nimport torch.nn as nn\nimport torch.nn.functional as F\nimp"
},
{
"path": "src/nav_node.py",
"chars": 24586,
"preview": "#!/usr/bin/env python\n\nfrom turtle import pos\n\nimport rospy\nimport numpy as np\nimport gtsam\nimport cv2\nimport torch\nimpo"
},
{
"path": "src/navigator_base.py",
"chars": 7217,
"preview": "import rospy\nimport numpy as np\nimport warnings\nfrom full_filter import NeRF\n\n # Base class to handle loading params fro"
},
{
"path": "src/particle_filter.py",
"chars": 5006,
"preview": "import numpy as np\nimport gtsam\n\nfrom multiprocessing import Lock\n\nclass ParticleFilter:\n\n def __init__(self, initial"
},
{
"path": "src/render_helpers.py",
"chars": 15540,
"preview": "import os\nimport torch\nfrom tqdm import tqdm\nimport time\nimport imageio\nimport numpy as np\nimport torch.nn.functional as"
},
{
"path": "src/utils.py",
"chars": 11417,
"preview": "import torch\nimport numpy as np\nimport imageio\nimport cv2\nimport json\nimport os\n\n# most of this script is adapted from i"
},
{
"path": "tools/eval_logs.py",
"chars": 9249,
"preview": "import matplotlib.pyplot as plt\nimport numpy as np\nimport glob\nimport gtsam\nimport sys\n\n######################## user pa"
},
{
"path": "tools/ros_to_jpg.py",
"chars": 769,
"preview": "import rosbag\nimport rospy\nimport numpy as np\nimport csv\nimport os\nimport cv2\nfrom cv_bridge import CvBridge\n\n######## u"
}
]
// ... and 520 more files (download for full content)
About this extraction
This page contains the full source code of the MIT-SPARK/Loc-NeRF GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 540 files (120.5 KB), approximately 42.4k tokens, and a symbol index with 71 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.