Repository: NeoGeographyToolkit/StereoPipeline
Branch: master
Commit: a3d1ace4c673
Files: 877
Total size: 19.0 MB
Directory structure:
gitextract_o1uagew5/
├── .github/
│ ├── ISSUE_TEMPLATE/
│ │ ├── bibliography-addition---.md
│ │ ├── bug-report---.md
│ │ └── feature-request---.md
│ ├── pull_request_template.md
│ └── workflows/
│ ├── build_helper.sh
│ ├── build_helper_linux.yml
│ ├── build_test.sh
│ ├── build_test_mac_arm64.yml
│ ├── build_test_mac_x64.yml
│ ├── save_linux_deps.sh
│ ├── save_mac_deps.sh
│ ├── ssh_linux_arm.yml
│ ├── ssh_mac_arm.yml
│ ├── ssh_mac_x64.yml
│ └── update_mac_tests.sh
├── .gitignore
├── .readthedocs.yml
├── AUTHORS.rst
├── CMakeLists.txt
├── CODE_OF_CONDUCT.rst
├── CONTRIBUTING.rst
├── ChangeLog
├── INSTALLGUIDE.rst
├── LICENSE
├── NEWS.rst
├── README.rst
├── RELEASEGUIDE
├── THIRDPARTYLICENSES.rst
├── cmake/
│ ├── FindCairo.cmake
│ ├── FindCairomm.cmake
│ ├── FindFreeType.cmake
│ ├── FindSigC++.cmake
│ ├── LibFindMacros.cmake
│ ├── Utilities.cmake
│ └── linux_cross_toolchain.cmake
├── conda/
│ ├── asp_2.7.0_linux_env.yaml
│ ├── asp_2.7.0_osx_env.yaml
│ ├── asp_3.0.0_linux_env.yaml
│ ├── asp_3.0.0_osx_env.yaml
│ ├── asp_3.1.0_linux_env.yaml
│ ├── asp_3.1.0_osx_env.yaml
│ ├── asp_3.2.0_linux_env.yaml
│ ├── asp_3.2.0_osx_env.yaml
│ ├── asp_3.3.0_linux_env.yaml
│ ├── asp_3.3.0_osx_env.yaml
│ ├── asp_3.5.0_linux_env.yaml
│ ├── asp_3.5.0_osx_x64_env.yaml
│ ├── asp_deps_2.7.0_linux_env.yaml
│ ├── asp_deps_2.7.0_osx_env.yaml
│ ├── asp_deps_3.0.0_linux_env.yaml
│ ├── asp_deps_3.0.0_osx_env.yaml
│ ├── asp_deps_3.1.0_linux_env.yaml
│ ├── asp_deps_3.1.0_osx_env.yaml
│ ├── asp_deps_3.2.0_linux_env.yaml
│ ├── asp_deps_3.2.0_osx_env.yaml
│ ├── asp_deps_3.3.0_linux_env.yaml
│ ├── asp_deps_3.3.0_osx_env.yaml
│ ├── asp_deps_3.4.0_alpha_linux_env.yaml
│ ├── asp_deps_3.4.0_alpha_osx_env.yaml
│ ├── environment.yml
│ └── update_versions.py
├── docs/
│ ├── Makefile
│ ├── acknowledgements.rst
│ ├── bathy_water_masking.rst
│ ├── bibliography.bib
│ ├── building_asp.rst
│ ├── bundle_adjustment.rst
│ ├── conf.py
│ ├── contributing.rst
│ ├── correlation.rst
│ ├── environment.yml
│ ├── error_propagation.rst
│ ├── examples/
│ │ ├── apollo15.rst
│ │ ├── aster.rst
│ │ ├── bathy.rst
│ │ ├── cassini.rst
│ │ ├── chandrayaan2.rst
│ │ ├── change3.rst
│ │ ├── csm.rst
│ │ ├── ctx.rst
│ │ ├── dawn.rst
│ │ ├── dg.rst
│ │ ├── hirise.rst
│ │ ├── historical.rst
│ │ ├── hrsc.rst
│ │ ├── isis_minirf.rst
│ │ ├── junocam.rst
│ │ ├── k10.rst
│ │ ├── kaguya.rst
│ │ ├── lronac.rst
│ │ ├── mer.rst
│ │ ├── moc.rst
│ │ ├── msl.rst
│ │ ├── orbital_rig.rst
│ │ ├── pbs_slurm.rst
│ │ ├── perusat1.rst
│ │ ├── pleiades.rst
│ │ ├── rig.rst
│ │ ├── rpc.rst
│ │ ├── sfm_iss.rst
│ │ ├── sfs_ctx.rst
│ │ ├── sfs_earth.rst
│ │ ├── skysat.rst
│ │ ├── spot5.rst
│ │ ├── spot67.rst
│ │ ├── stereo_pairs.rst
│ │ └── umbra_sar.rst
│ ├── examples.rst
│ ├── experimental.rst
│ ├── glossary.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── introduction.rst
│ ├── news.rst
│ ├── next_steps.rst
│ ├── outputfiles.rst
│ ├── papersusingasp.bib
│ ├── papersusingasp.rst
│ ├── pinholemodels.rst
│ ├── requirements.txt
│ ├── sfm.rst
│ ├── sfs_usage.rst
│ ├── stereo_algorithms.rst
│ ├── stereodefault.rst
│ ├── thirdparty.rst
│ ├── tips_tricks.rst
│ ├── tools/
│ │ ├── add_spot_rpc.rst
│ │ ├── aster2asp.rst
│ │ ├── bathy_plane_calc.rst
│ │ ├── bathy_threshold_calc.rst
│ │ ├── bundle_adjust.rst
│ │ ├── cam2map4stereo.rst
│ │ ├── cam2rpc.rst
│ │ ├── cam_gen.rst
│ │ ├── cam_test.rst
│ │ ├── camera_calibrate.rst
│ │ ├── camera_footprint.rst
│ │ ├── camera_solve.rst
│ │ ├── cgal_tools.rst
│ │ ├── colormap.rst
│ │ ├── convert_pinhole_model.rst
│ │ ├── corr_eval.rst
│ │ ├── correlator.rst
│ │ ├── dem2gcp.rst
│ │ ├── dem_geoid.rst
│ │ ├── dem_mosaic.rst
│ │ ├── dg_mosaic.rst
│ │ ├── disp2ip.rst
│ │ ├── disparitydebug.rst
│ │ ├── gcp_gen.rst
│ │ ├── gdal.rst
│ │ ├── geodiff.rst
│ │ ├── hiedr2mosaic.rst
│ │ ├── hillshade.rst
│ │ ├── historical_helper.rst
│ │ ├── icebridge_kmz_to_csv.rst
│ │ ├── image2qtree.rst
│ │ ├── image_align.rst
│ │ ├── image_calc.rst
│ │ ├── image_mosaic.rst
│ │ ├── image_subset.rst
│ │ ├── ipfind.rst
│ │ ├── ipmatch.rst
│ │ ├── jitter_solve.rst
│ │ ├── lronac2mosaic.rst
│ │ ├── lvis2kml.rst
│ │ ├── mapproject.rst
│ │ ├── multi_stereo.rst
│ │ ├── n_align.rst
│ │ ├── orbit_plot.rst
│ │ ├── orbitviz.rst
│ │ ├── ortho2pinhole.rst
│ │ ├── otsu_threshold.rst
│ │ ├── pansharp.rst
│ │ ├── parallel_bundle_adjust.rst
│ │ ├── parallel_sfs.rst
│ │ ├── parallel_stereo.rst
│ │ ├── parse_match_file.rst
│ │ ├── pc_align.rst
│ │ ├── pc_filter.rst
│ │ ├── pc_merge.rst
│ │ ├── point2dem.rst
│ │ ├── point2las.rst
│ │ ├── point2mesh.rst
│ │ ├── refr_index.rst
│ │ ├── rig_calibrator.rst
│ │ ├── ros.rst
│ │ ├── sat_sim.rst
│ │ ├── sfm_merge.rst
│ │ ├── sfm_proc.rst
│ │ ├── sfm_submap.rst
│ │ ├── sfm_view.rst
│ │ ├── sfs.rst
│ │ ├── sfs_blend.rst
│ │ ├── stereo.rst
│ │ ├── stereo_dist.rst
│ │ ├── stereo_gui.rst
│ │ ├── stereo_sweep.rst
│ │ ├── stereo_tile.rst
│ │ ├── texrecon.rst
│ │ ├── theia_sfm.rst
│ │ ├── undistort_image.rst
│ │ ├── view_reconstruction.rst
│ │ ├── voxblox_mesh.rst
│ │ └── wv_correct.rst
│ ├── tools.rst
│ ├── tutorial.rst
│ └── zzreferences.rst
├── examples/
│ ├── CTX/
│ │ ├── Makefile
│ │ ├── control.net
│ │ └── stereo.nonmap
│ ├── HiRISE/
│ │ ├── Makefile
│ │ ├── downloader.sh
│ │ └── stereo.nonmap
│ ├── K10/
│ │ ├── Makefile
│ │ ├── black_left.tsai
│ │ ├── black_right.tsai
│ │ └── stereo.default
│ ├── MER/
│ │ ├── Makefile
│ │ ├── stereo.default
│ │ └── stereo.default.navcam
│ ├── MOC/
│ │ ├── E0201461.imq
│ │ ├── M0100115.imq
│ │ ├── Makefile
│ │ ├── control.net
│ │ └── stereo.nonmap
│ ├── TriangulationError.lut
│ └── surf_match.patch
├── plugins/
│ └── stereo/
│ └── plugin_list.txt
├── src/
│ ├── CMakeLists.txt
│ ├── asp/
│ │ ├── CMakeLists.txt
│ │ ├── Camera/
│ │ │ ├── ASTER_XML.cc
│ │ │ ├── ASTER_XML.h
│ │ │ ├── BaParams.cc
│ │ │ ├── BaParams.h
│ │ │ ├── BaseCostFuns.cc
│ │ │ ├── BaseCostFuns.h
│ │ │ ├── BundleAdjustCamera.cc
│ │ │ ├── BundleAdjustCamera.h
│ │ │ ├── BundleAdjustCamera2.cc
│ │ │ ├── BundleAdjustCostFuns.cc
│ │ │ ├── BundleAdjustCostFuns.h
│ │ │ ├── BundleAdjustEigen.cc
│ │ │ ├── BundleAdjustEigen.h
│ │ │ ├── BundleAdjustIO.cc
│ │ │ ├── BundleAdjustIsis.cc
│ │ │ ├── BundleAdjustIsis.h
│ │ │ ├── BundleAdjustOptions.cc
│ │ │ ├── BundleAdjustOptions.h
│ │ │ ├── BundleAdjustOutliers.cc
│ │ │ ├── BundleAdjustOutliers.h
│ │ │ ├── BundleAdjustResiduals.cc
│ │ │ ├── BundleAdjustResiduals.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── CameraErrorPropagation.cc
│ │ │ ├── CameraErrorPropagation.h
│ │ │ ├── CameraResectioning.cc
│ │ │ ├── CameraResectioning.h
│ │ │ ├── Covariance.cc
│ │ │ ├── Covariance.h
│ │ │ ├── CsmModel.cc
│ │ │ ├── CsmModel.h
│ │ │ ├── CsmModelFit.cc
│ │ │ ├── CsmModelFit.h
│ │ │ ├── CsmUtils.cc
│ │ │ ├── CsmUtils.h
│ │ │ ├── JitterSolveCostFuns.cc
│ │ │ ├── JitterSolveCostFuns.h
│ │ │ ├── JitterSolveRigCostFuns.cc
│ │ │ ├── JitterSolveRigCostFuns.h
│ │ │ ├── JitterSolveRigUtils.cc
│ │ │ ├── JitterSolveRigUtils.h
│ │ │ ├── JitterSolveUtils.cc
│ │ │ ├── JitterSolveUtils.h
│ │ │ ├── LinescanASTERModel.cc
│ │ │ ├── LinescanASTERModel.h
│ │ │ ├── LinescanDGModel.cc
│ │ │ ├── LinescanDGModel.h
│ │ │ ├── LinescanPeruSatModel.cc
│ │ │ ├── LinescanPeruSatModel.h
│ │ │ ├── LinescanPleiadesModel.cc
│ │ │ ├── LinescanPleiadesModel.h
│ │ │ ├── LinescanSpotModel.cc
│ │ │ ├── LinescanSpotModel.h
│ │ │ ├── LinescanUtils.cc
│ │ │ ├── LinescanUtils.h
│ │ │ ├── MapprojectImage.cc
│ │ │ ├── MapprojectImage.h
│ │ │ ├── PRISM_XML.cc
│ │ │ ├── PRISM_XML.h
│ │ │ ├── PeruSatXML.cc
│ │ │ ├── PeruSatXML.h
│ │ │ ├── PleiadesXML.cc
│ │ │ ├── PleiadesXML.h
│ │ │ ├── RPCModel.cc
│ │ │ ├── RPCModel.h
│ │ │ ├── RPCModelGen.cc
│ │ │ ├── RPCModelGen.h
│ │ │ ├── RPCStereoModel.cc
│ │ │ ├── RPCStereoModel.h
│ │ │ ├── RPC_XML.cc
│ │ │ ├── RPC_XML.h
│ │ │ ├── RpcUtils.cc
│ │ │ ├── RpcUtils.h
│ │ │ ├── SPOT_XML.cc
│ │ │ ├── SPOT_XML.h
│ │ │ ├── SatSim.cc
│ │ │ ├── SatSim.h
│ │ │ ├── SyntheticLinescan.cc
│ │ │ ├── SyntheticLinescan.h
│ │ │ ├── TimeProcessing.cc
│ │ │ ├── TimeProcessing.h
│ │ │ ├── XMLBase.cc
│ │ │ ├── XMLBase.h
│ │ │ └── tests/
│ │ │ ├── TestCsmCameraModel.cxx
│ │ │ ├── TestDGCameraModel.cxx
│ │ │ ├── TestRPCStereoModel.cxx
│ │ │ ├── TestSpotCameraModel.cxx
│ │ │ ├── ctx.json
│ │ │ ├── dg_example1.xml
│ │ │ ├── dg_example2.xml
│ │ │ ├── dg_example3.xml
│ │ │ ├── dg_example4.xml
│ │ │ ├── sample.RPB
│ │ │ ├── spot_example1.xml
│ │ │ ├── spot_style.xsl
│ │ │ ├── wv_mvp_1.xml
│ │ │ ├── wv_mvp_2.xml
│ │ │ ├── wv_test1.xml
│ │ │ └── wv_test2.xml
│ │ ├── Core/
│ │ │ ├── AffineEpipolar.cc
│ │ │ ├── AffineEpipolar.h
│ │ │ ├── AlignmentUtils.cc
│ │ │ ├── AlignmentUtils.h
│ │ │ ├── AspLog.cc
│ │ │ ├── AspLog.h
│ │ │ ├── AspProgramOptions.cc
│ │ │ ├── AspProgramOptions.h
│ │ │ ├── AspStringUtils.cc
│ │ │ ├── AspStringUtils.h
│ │ │ ├── AsterProc.cc
│ │ │ ├── AsterProc.h
│ │ │ ├── BaBaseOptions.h
│ │ │ ├── BaseCameraUtils.cc
│ │ │ ├── BaseCameraUtils.h
│ │ │ ├── BathyPlaneCalc.cc
│ │ │ ├── BathyPlaneCalc.h
│ │ │ ├── Bathymetry.cc
│ │ │ ├── Bathymetry.h
│ │ │ ├── BitChecker.cc
│ │ │ ├── BitChecker.h
│ │ │ ├── BundleAdjustUtils.cc
│ │ │ ├── BundleAdjustUtils.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── CamPoseUtils.cc
│ │ │ ├── CamPoseUtils.h
│ │ │ ├── CameraTransforms.cc
│ │ │ ├── CameraTransforms.h
│ │ │ ├── CartographyUtils.cc
│ │ │ ├── CartographyUtils.h
│ │ │ ├── DataLoader.cc
│ │ │ ├── DataLoader.h
│ │ │ ├── DemDisparity.cc
│ │ │ ├── DemDisparity.h
│ │ │ ├── DemMosaic.cc
│ │ │ ├── DemMosaic.h
│ │ │ ├── DemMosaicOptions.h
│ │ │ ├── DemMosaicParse.cc
│ │ │ ├── DemMosaicParse.h
│ │ │ ├── DemUtils.cc
│ │ │ ├── DemUtils.h
│ │ │ ├── DisparityProcessing.cc
│ │ │ ├── DisparityProcessing.h
│ │ │ ├── EigenTransformUtils.cc
│ │ │ ├── EigenTransformUtils.h
│ │ │ ├── EigenUtils.cc
│ │ │ ├── EigenUtils.h
│ │ │ ├── EnvUtils.cc
│ │ │ ├── EnvUtils.h
│ │ │ ├── FileUtils.cc
│ │ │ ├── FileUtils.h
│ │ │ ├── GCP.cc
│ │ │ ├── GCP.h
│ │ │ ├── GdalUtils.cc
│ │ │ ├── GdalUtils.h
│ │ │ ├── ImageNormalization.cc
│ │ │ ├── ImageNormalization.h
│ │ │ ├── ImageUtils.cc
│ │ │ ├── ImageUtils.h
│ │ │ ├── InterestPointMatching.cc
│ │ │ ├── InterestPointMatching.h
│ │ │ ├── InterestPointMatching2.cc
│ │ │ ├── IpMatchingAlgs.cc
│ │ │ ├── IpMatchingAlgs.h
│ │ │ ├── LocalAlignment.cc
│ │ │ ├── LocalAlignment.h
│ │ │ ├── Macros.h
│ │ │ ├── MatchList.cc
│ │ │ ├── MatchList.h
│ │ │ ├── Nvm.cc
│ │ │ ├── Nvm.h
│ │ │ ├── OpenCVUtils.cc
│ │ │ ├── OpenCVUtils.h
│ │ │ ├── OrthoRasterizer.cc
│ │ │ ├── OrthoRasterizer.h
│ │ │ ├── OutlierProcessing.cc
│ │ │ ├── OutlierProcessing.h
│ │ │ ├── PdalUtils.cc
│ │ │ ├── PdalUtils.h
│ │ │ ├── PhotometricOutlier.cc
│ │ │ ├── PhotometricOutlier.h
│ │ │ ├── Point2Grid.cc
│ │ │ ├── Point2Grid.h
│ │ │ ├── PointCloudAlignment.cc
│ │ │ ├── PointCloudAlignment.h
│ │ │ ├── PointCloudProcessing.cc
│ │ │ ├── PointCloudProcessing.h
│ │ │ ├── PointCloudRead.cc
│ │ │ ├── PointCloudRead.h
│ │ │ ├── PointToDem.cc
│ │ │ ├── PointToDem.h
│ │ │ ├── PointToDem2.cc
│ │ │ ├── PointUtils.cc
│ │ │ ├── PointUtils.h
│ │ │ ├── ProjectiveCamApprox.cc
│ │ │ ├── ProjectiveCamApprox.h
│ │ │ ├── ReportUtils.cc
│ │ │ ├── ReportUtils.h
│ │ │ ├── StereoSettings.cc
│ │ │ ├── StereoSettings.h
│ │ │ ├── StereoSettingsDesc.h
│ │ │ ├── StereoSettingsParse.cc
│ │ │ ├── StereoSettingsParse.h
│ │ │ ├── StereoTiling.cc
│ │ │ ├── StereoTiling.h
│ │ │ ├── ThreadedEdgeMask.h
│ │ │ └── tests/
│ │ │ ├── TestAspProgramOptions.cxx
│ │ │ ├── TestIntegralAutoGainDetector.cxx
│ │ │ ├── TestInterestPointMatching.cxx
│ │ │ ├── TestPointUtils.cxx
│ │ │ ├── TestThreadedEdgeMask.cxx
│ │ │ ├── ThreadTest1.tif
│ │ │ ├── ThreadTest2.tif
│ │ │ ├── ThreadTest3.tif
│ │ │ └── sample_ascii.pcd
│ │ ├── GUI/
│ │ │ ├── AppData.cc
│ │ │ ├── AppData.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── ChooseFilesDlg.cc
│ │ │ ├── ChooseFilesDlg.h
│ │ │ ├── DiskImagePyramidMultiChannel.cc
│ │ │ ├── DiskImagePyramidMultiChannel.h
│ │ │ ├── EventWidget.cc
│ │ │ ├── GuiArgs.cc
│ │ │ ├── GuiArgs.h
│ │ │ ├── GuiBase.cc
│ │ │ ├── GuiBase.h
│ │ │ ├── GuiConstants.h
│ │ │ ├── GuiGeom.cc
│ │ │ ├── GuiGeom.h
│ │ │ ├── GuiUtilities.cc
│ │ │ ├── GuiUtilities.h
│ │ │ ├── ImageData.cc
│ │ │ ├── ImageData.h
│ │ │ ├── MainWidget.cc
│ │ │ ├── MainWidget.h
│ │ │ ├── MainWindow.cc
│ │ │ ├── MainWindow.h
│ │ │ ├── MatchPointMgr.cc
│ │ │ ├── MatchPointMgr.h
│ │ │ ├── PolyWidget.cc
│ │ │ ├── ProfileWidget.cc
│ │ │ ├── WidgetBase.cc
│ │ │ ├── WidgetBase.h
│ │ │ ├── WidgetMenuMgr.cc
│ │ │ ├── WidgetMenuMgr.h
│ │ │ ├── WindowMenuMgr.cc
│ │ │ └── WindowMenuMgr.h
│ │ ├── GenerateConfig.cmake
│ │ ├── Gotcha/
│ │ │ ├── ALSC.cc
│ │ │ ├── ALSC.h
│ │ │ ├── CALSCParam.h
│ │ │ ├── CASP-GO_params.xml
│ │ │ ├── CBatchProc.cc
│ │ │ ├── CBatchProc.h
│ │ │ ├── CDensify.cc
│ │ │ ├── CDensify.h
│ │ │ ├── CDensifyParam.h
│ │ │ ├── CGOTCHAParam.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── CProcBlock.cc
│ │ │ ├── CProcBlock.h
│ │ │ ├── CTiePt.h
│ │ │ └── README.txt
│ │ ├── IceBridge/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── README.txt
│ │ │ ├── archive_functions.py
│ │ │ ├── blend_dems.py
│ │ │ ├── camera_lookup.txt
│ │ │ ├── camera_models_from_nav.py
│ │ │ ├── correct_icebridge_l3_dem.cc
│ │ │ ├── extract_icebridge_ATM_points.py
│ │ │ ├── fetch_icebridge_data.py
│ │ │ ├── fetcher_script.py
│ │ │ ├── full_processing_script.py
│ │ │ ├── gen_ortho.py
│ │ │ ├── generate_fake_camera_models.py
│ │ │ ├── generate_flight_summary.py
│ │ │ ├── get_date_list.py
│ │ │ ├── icebridge_common.py
│ │ │ ├── icebridge_kmz_to_csv.py
│ │ │ ├── input_conversions.py
│ │ │ ├── label_images.py
│ │ │ ├── lvis2kml.py
│ │ │ ├── merge_orbitviz.py
│ │ │ ├── multi_flight_label_runner.py
│ │ │ ├── multi_process_command_runner.py
│ │ │ ├── nav2cam.cc
│ │ │ ├── orbitviz_pinhole.cc
│ │ │ ├── ortho2pinhole.cc
│ │ │ ├── pbs_functions.py
│ │ │ ├── pleiades_job_runner.sh
│ │ │ ├── pleiades_manager.py
│ │ │ ├── process_calibration_file.py
│ │ │ ├── process_icebridge_batch.py
│ │ │ ├── process_icebridge_run.py
│ │ │ ├── push_to_nsidc.py
│ │ │ ├── qi2txt.cc
│ │ │ ├── regenerate_summary_images.py
│ │ │ ├── run_helper.py
│ │ │ ├── run_multiple_flights.py
│ │ │ ├── sbet2txt.pl
│ │ │ └── special_cases.txt
│ │ ├── IsisIO/
│ │ │ ├── BaseEquation.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── DiskImageResourceIsis.cc
│ │ │ ├── DiskImageResourceIsis.h
│ │ │ ├── Equation.cc
│ │ │ ├── Equation.h
│ │ │ ├── IsisCameraModel.h
│ │ │ ├── IsisInterface.cc
│ │ │ ├── IsisInterface.h
│ │ │ ├── IsisInterfaceFrame.cc
│ │ │ ├── IsisInterfaceFrame.h
│ │ │ ├── IsisInterfaceLineScan.cc
│ │ │ ├── IsisInterfaceLineScan.h
│ │ │ ├── IsisInterfaceMapFrame.cc
│ │ │ ├── IsisInterfaceMapFrame.h
│ │ │ ├── IsisInterfaceMapLineScan.cc
│ │ │ ├── IsisInterfaceMapLineScan.h
│ │ │ ├── IsisInterfaceSAR.cc
│ │ │ ├── IsisInterfaceSAR.h
│ │ │ ├── IsisSpecialPixels.cc
│ │ │ ├── IsisSpecialPixels.h
│ │ │ ├── PolyEquation.cc
│ │ │ ├── PolyEquation.h
│ │ │ ├── RPNEquation.cc
│ │ │ ├── RPNEquation.h
│ │ │ └── tests/
│ │ │ ├── 5165r.cub
│ │ │ ├── 5165r.map.cub
│ │ │ ├── E0201461.tiny.cub
│ │ │ ├── E1701676.reduce.cub
│ │ │ ├── TestEphemerisEquations.cxx
│ │ │ └── TestIsisCameraModel.cxx
│ │ ├── OpenMVG/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── README.md
│ │ │ ├── flat_pair_map.hpp
│ │ │ ├── indMatch.hpp
│ │ │ ├── numeric.cpp
│ │ │ ├── numeric.h
│ │ │ ├── projection.cpp
│ │ │ ├── projection.hpp
│ │ │ ├── tracks.hpp
│ │ │ ├── triangulation.cpp
│ │ │ ├── triangulation.hpp
│ │ │ ├── triangulation_nview.cpp
│ │ │ ├── triangulation_nview.hpp
│ │ │ ├── types.hpp
│ │ │ └── union_find.hpp
│ │ ├── PcAlign/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── MaskedImageAlgs.cc
│ │ │ ├── MaskedImageAlgs.h
│ │ │ ├── NuthAlignment.cc
│ │ │ ├── NuthAlignment.h
│ │ │ ├── NuthAlignmentParse.cc
│ │ │ ├── NuthAlignmentParse.h
│ │ │ ├── NuthFit.cc
│ │ │ ├── NuthFit.h
│ │ │ ├── SlopeAspect.cc
│ │ │ ├── SlopeAspect.h
│ │ │ ├── pc_align_ceres.cc
│ │ │ ├── pc_align_ceres.h
│ │ │ ├── pc_align_fgr.cc
│ │ │ ├── pc_align_fgr.h
│ │ │ ├── pc_align_utils.cc
│ │ │ └── pc_align_utils.h
│ │ ├── PclIO/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── PclIO.cc
│ │ │ └── PclIO.h
│ │ ├── Python/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── asp_alg_utils.py
│ │ │ ├── asp_cmd_utils.py
│ │ │ ├── asp_dist_utils.py
│ │ │ ├── asp_file_utils.py
│ │ │ ├── asp_geo_utils.py
│ │ │ ├── asp_image_utils.py
│ │ │ ├── asp_rig_utils.py
│ │ │ ├── asp_stereo_utils.py
│ │ │ ├── asp_string_utils.py
│ │ │ ├── asp_system_utils.py
│ │ │ └── scale_linescan.py
│ │ ├── Rig/
│ │ │ ├── BasicAlgs.cc
│ │ │ ├── BasicAlgs.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── CameraImage.h
│ │ │ ├── Detector.cc
│ │ │ ├── Detector.h
│ │ │ ├── ImageLookup.cc
│ │ │ ├── ImageLookup.h
│ │ │ ├── InterestPoint.cc
│ │ │ ├── InterestPoint.h
│ │ │ ├── InterpolationUtils.cc
│ │ │ ├── InterpolationUtils.h
│ │ │ ├── MergeMaps.cc
│ │ │ ├── MergeMaps.h
│ │ │ ├── NvmUtils.cc
│ │ │ ├── NvmUtils.h
│ │ │ ├── Ransac.cc
│ │ │ ├── Ransac.h
│ │ │ ├── RigCameraParams.cc
│ │ │ ├── RigCameraParams.h
│ │ │ ├── RigCameraUtils.cc
│ │ │ ├── RigCameraUtils.h
│ │ │ ├── RigConfig.cc
│ │ │ ├── RigConfig.h
│ │ │ ├── RigCostFunction.cc
│ │ │ ├── RigCostFunction.h
│ │ │ ├── RigData.cc
│ │ │ ├── RigData.h
│ │ │ ├── RigDem.cc
│ │ │ ├── RigDem.h
│ │ │ ├── RigImageIO.cc
│ │ │ ├── RigImageIO.h
│ │ │ ├── RigIo.cc
│ │ │ ├── RigIo.h
│ │ │ ├── RigMath.cc
│ │ │ ├── RigMath.h
│ │ │ ├── RigOptimizer.cc
│ │ │ ├── RigOptimizer.h
│ │ │ ├── RigOptions.h
│ │ │ ├── RigOutlier.cc
│ │ │ ├── RigOutlier.h
│ │ │ ├── RigParseOptions.cc
│ │ │ ├── RigParseOptions.h
│ │ │ ├── RigParseUtils.cc
│ │ │ ├── RigParseUtils.h
│ │ │ ├── RigRpcDistortion.cc
│ │ │ ├── RigRpcDistortion.h
│ │ │ ├── RigThread.cc
│ │ │ ├── RigThread.h
│ │ │ ├── RigTypeDefs.h
│ │ │ ├── RigUtils.cc
│ │ │ ├── RigUtils.h
│ │ │ ├── SystemUtils.cc
│ │ │ ├── SystemUtils.h
│ │ │ ├── TextureProcessing.cc
│ │ │ ├── TextureProcessing.h
│ │ │ ├── Tracks.cc
│ │ │ ├── Tracks.h
│ │ │ ├── TransformUtils.cc
│ │ │ ├── TransformUtils.h
│ │ │ ├── Triangulation.cc
│ │ │ ├── Triangulation.h
│ │ │ ├── happly.h
│ │ │ └── theia_flags.txt
│ │ ├── Sessions/
│ │ │ ├── BundleAdjustParse.cc
│ │ │ ├── BundleAdjustParse.h
│ │ │ ├── BundleAdjustSession.cc
│ │ │ ├── BundleAdjustSession.h
│ │ │ ├── CMakeLists.txt
│ │ │ ├── CameraModelLoader.cc
│ │ │ ├── CameraModelLoader.h
│ │ │ ├── CameraUtils.cc
│ │ │ ├── CameraUtils.h
│ │ │ ├── StereoSession.cc
│ │ │ ├── StereoSession.h
│ │ │ ├── StereoSessionASTER.cc
│ │ │ ├── StereoSessionASTER.h
│ │ │ ├── StereoSessionBathy.cc
│ │ │ ├── StereoSessionFactory.cc
│ │ │ ├── StereoSessionFactory.h
│ │ │ ├── StereoSessionGdal.cc
│ │ │ ├── StereoSessionGdal.h
│ │ │ ├── StereoSessionIo.cc
│ │ │ ├── StereoSessionIp.cc
│ │ │ ├── StereoSessionIsis.cc
│ │ │ ├── StereoSessionIsis.h
│ │ │ ├── StereoSessionMapProj.h
│ │ │ ├── StereoSessionNadirPinhole.cc
│ │ │ ├── StereoSessionNadirPinhole.h
│ │ │ ├── StereoSessionPinhole.cc
│ │ │ ├── StereoSessionPinhole.h
│ │ │ ├── StereoSessionRPC.h
│ │ │ └── tests/
│ │ │ ├── TestInstantiation.cxx
│ │ │ ├── TestStereoSessionASTER.cxx
│ │ │ ├── TestStereoSessionDG.cxx
│ │ │ ├── TestStereoSessionDGMapRPC.cxx
│ │ │ ├── TestStereoSessionRPC.cxx
│ │ │ ├── TestStereoSessionSpot.cxx
│ │ │ ├── dg_example1.xml
│ │ │ ├── dg_example2.xml
│ │ │ ├── dg_example3.xml
│ │ │ └── dg_example4.xml
│ │ ├── SfS/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── SfsArgs.cc
│ │ │ ├── SfsArgs.h
│ │ │ ├── SfsCamera.cc
│ │ │ ├── SfsCamera.h
│ │ │ ├── SfsCostFun.cc
│ │ │ ├── SfsCostFun.h
│ │ │ ├── SfsErrorEstim.cc
│ │ │ ├── SfsErrorEstim.h
│ │ │ ├── SfsImageProc.cc
│ │ │ ├── SfsImageProc.h
│ │ │ ├── SfsModel.cc
│ │ │ ├── SfsModel.h
│ │ │ ├── SfsOptions.cc
│ │ │ ├── SfsOptions.h
│ │ │ ├── SfsUtils.cc
│ │ │ └── SfsUtils.h
│ │ ├── SfmView/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── GlCommon.h
│ │ │ ├── GlContext.cc
│ │ │ ├── GlContext.h
│ │ │ ├── GlWidget.cc
│ │ │ ├── GlWidget.h
│ │ │ ├── MeshRenderer.cc
│ │ │ ├── MeshRenderer.h
│ │ │ ├── SceneManager.cc
│ │ │ ├── SceneManager.h
│ │ │ ├── SceneOverview.cc
│ │ │ ├── SceneOverview.h
│ │ │ ├── SceneRenderer.cc
│ │ │ ├── SceneRenderer.h
│ │ │ ├── SfmMainWindow.cc
│ │ │ ├── SfmMainWindow.h
│ │ │ ├── SfmMath.h
│ │ │ ├── SfmUtils.cc
│ │ │ └── SfmUtils.h
│ │ ├── Tools/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── add_spot_rpc.cc
│ │ │ ├── aster2asp.cc
│ │ │ ├── bathy_plane_calc.cc
│ │ │ ├── bathy_threshold_calc.py
│ │ │ ├── bias_dg_cam.py
│ │ │ ├── bundle_adjust.cc
│ │ │ ├── cam2map4stereo.py
│ │ │ ├── cam2rpc.cc
│ │ │ ├── cam_gen.cc
│ │ │ ├── cam_test.cc
│ │ │ ├── camera_calibrate
│ │ │ ├── camera_footprint.cc
│ │ │ ├── camera_solve
│ │ │ ├── corr_eval.cc
│ │ │ ├── coverage_fraction.cc
│ │ │ ├── dem2gcp.cc
│ │ │ ├── dem_geoid.cc
│ │ │ ├── dem_mosaic.cc
│ │ │ ├── dg_mosaic
│ │ │ ├── disp2ip.cc
│ │ │ ├── disparitydebug.cc
│ │ │ ├── extract_bag
│ │ │ ├── fit_rpc.cc
│ │ │ ├── gcp_gen.cc
│ │ │ ├── geodiff.cc
│ │ │ ├── hiedr2mosaic.py
│ │ │ ├── historical_helper.py
│ │ │ ├── image_align.cc
│ │ │ ├── image_calc.cc
│ │ │ ├── image_mosaic.cc
│ │ │ ├── image_subset.cc
│ │ │ ├── jitter_solve.cc
│ │ │ ├── list_timestamps
│ │ │ ├── lronac2mosaic.py
│ │ │ ├── lronacjitreg.cc
│ │ │ ├── mapproject
│ │ │ ├── mapproject_single.cc
│ │ │ ├── mer2camera.cc
│ │ │ ├── multi_stereo
│ │ │ ├── n_align.cc
│ │ │ ├── nonlin_trans_corr.cc
│ │ │ ├── opencv_calibrate.cc
│ │ │ ├── opencv_imagelist_creator.cc
│ │ │ ├── orbit_plot.py
│ │ │ ├── orbitviz.cc
│ │ │ ├── otsu_threshold.cc
│ │ │ ├── pansharp.cc
│ │ │ ├── parallel_bundle_adjust
│ │ │ ├── parallel_sfs
│ │ │ ├── parallel_stereo
│ │ │ ├── parse_match_file.py
│ │ │ ├── pc_align.cc
│ │ │ ├── pc_filter.cc
│ │ │ ├── pc_merge.cc
│ │ │ ├── point2dem.cc
│ │ │ ├── point2las.cc
│ │ │ ├── point2mesh.cc
│ │ │ ├── prism2asp.cc
│ │ │ ├── refr_index
│ │ │ ├── rig_bracket
│ │ │ ├── rig_calibrator.cc
│ │ │ ├── rpc_gen.cc
│ │ │ ├── runWithLog.py
│ │ │ ├── sat_sim.cc
│ │ │ ├── scale_bathy_mask.py
│ │ │ ├── sfm_merge.cc
│ │ │ ├── sfm_proc
│ │ │ ├── sfm_submap.cc
│ │ │ ├── sfm_view.cc
│ │ │ ├── sfs.cc
│ │ │ ├── sfs_blend.cc
│ │ │ ├── skysat_helper.py
│ │ │ ├── sparse_disp
│ │ │ ├── stereo
│ │ │ ├── stereo.cc
│ │ │ ├── stereo.h
│ │ │ ├── stereo_blend.cc
│ │ │ ├── stereo_corr.cc
│ │ │ ├── stereo_dist
│ │ │ ├── stereo_fltr.cc
│ │ │ ├── stereo_gui.cc
│ │ │ ├── stereo_parse.cc
│ │ │ ├── stereo_pprc.cc
│ │ │ ├── stereo_rfne.cc
│ │ │ ├── stereo_sweep
│ │ │ ├── stereo_tile
│ │ │ ├── stereo_tri.cc
│ │ │ ├── texrecon
│ │ │ ├── theia_sfm
│ │ │ ├── tif_mosaic.cc
│ │ │ ├── time_trials
│ │ │ ├── undistort_image_texrecon.cc
│ │ │ ├── wv_correct.cc
│ │ │ └── xyzi2csv.cc
│ │ ├── WVCorrect/
│ │ │ ├── CMakeLists.txt
│ │ │ ├── README_MULTISPECTRAL
│ │ │ ├── README_PAN
│ │ │ ├── WV02_BAND3_CCD_CORR.tif
│ │ │ ├── ccd_process.py
│ │ │ ├── disp_avg.cc
│ │ │ ├── find_ccds.m
│ │ │ ├── find_ccds_aux.m
│ │ │ ├── find_moving_avg.m
│ │ │ ├── fix_ccd.m
│ │ │ ├── form_corrections_image.py
│ │ │ ├── gen_scandir.pl
│ │ │ ├── ms_ccd_solve.sh
│ │ │ ├── ms_ccd_verify.sh
│ │ │ ├── ms_correction_lookup.txt
│ │ │ ├── plot_ccds.m
│ │ │ ├── run_lr.sh
│ │ │ └── run_lr_wrap.sh
│ │ ├── asp_config.h.in
│ │ └── asp_date_config.h.in
│ └── test/
│ ├── Helpers.h
│ └── test_main.cc
├── stereo.default.example
└── thirdparty/
├── autotroll.mak
├── gtest/
│ ├── CHANGES
│ ├── CONTRIBUTORS
│ ├── COPYING
│ ├── README
│ ├── include/
│ │ └── gtest/
│ │ └── gtest_ASP.h
│ └── src/
│ ├── gtest-all.cc
│ └── gtest_main.cc
├── m4/
│ └── autotroll.m4
└── protobuf.mak
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/ISSUE_TEMPLATE/bibliography-addition---.md
================================================
---
name: "Bibliography Addition \U0001F5C3"
about: Add a paper, abstract, or other reference that used ASP!
title: ''
labels: ''
assignees: ''
---
**DOI**
If the work has a DOI, that's really all we need. Please enter it.
**Citation**
Otherwise, please provide as complete a citation as possible.
If you have a citation in BibTeX format, please paste that in (but it isn't required).
================================================
FILE: .github/ISSUE_TEMPLATE/bug-report---.md
================================================
---
name: "Bug Report \U0001F41B"
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error
**Expected behavior**
A clear and concise description of what you expected to happen.
**Error Logs, Terminal Captures, Screenshots**
If applicable, please give us as much information as you can to help explain your problem.
**Your Environment (please complete the following information):**
- OS: [e.g. macOS, Linux, and their version number]
- ASP Version: [e.g. 2.6.2, can find via `stereo --version`]
- Any other environment information that might be helpful?
**Additional context**
Add any other context about the problem here.
================================================
FILE: .github/ISSUE_TEMPLATE/feature-request---.md
================================================
---
name: "Feature Request \U0001F680"
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
================================================
FILE: .github/pull_request_template.md
================================================
## Description
## Related Issue
## Motivation and Context
## How Has This Been Tested?
## Types of changes
- Bug fix (non-breaking change which fixes an issue)
- New feature (non-breaking change which adds functionality)
- Breaking change (fix or feature that would cause existing functionality to change)
## Checklist:
- My change requires a change to the documentation.
- I have updated the documentation accordingly.
- I have added tests to cover my changes.
- All new and existing tests passed.
## Licensing:
This project is released under the [LICENSE](https://github.com/NeoGeographyToolkit/StereoPipeline/blob/master/LICENSE).
- I claim copyrights on my contributions in this pull request, and I provide those contributions via this pull request under the same license terms that the pvl project uses.
- I dedicate any and all copyright interest in my contributions in this pull request to the public domain. I make this dedication for the benefit of the public at large and to the detriment of my heirs and successors. I intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this contribution under copyright law.
================================================
FILE: .github/workflows/build_helper.sh
================================================
#!/bin/bash
# This is a debugging script that builds ASP and its dependencies. It also shows
# how the result of building can be uploaded as an artifact, and later moved to
# a permanent location. Some parts are more focused towards the Mac build in the
# cloud, while others apply to the local Linux build as well.
# This script is not meant to be run directly. Each block of code must be
# inspected, edited, and run separately. This is a scratchpad. The production
# logic is in build_test.sh, save_mac_deps.sh, and the
# https://github.com/NeoGeographyToolkit/stereopipeline-feedstock repo.
# This helps do a dress rehearsal for the build process, before using
# conda-build which is very slow and error-prone.
# After the dependencies are updated with this script, they can be saved for the
# future with the script save_mac_deps.sh. See that script for more info.
# Move from the source dir to the home dir
cd
# Set up the compiler. Using a known compiler that is in the environment ensures
# there are no surprises when later conda-build is employed with the same
# compiler.
if [ "$(uname)" = "Darwin" ]; then
cc_comp=clang
cxx_comp=clang++
else
cc_comp=x86_64-conda-linux-gnu-gcc
cxx_comp=x86_64-conda-linux-gnu-g++
fi
# Fetch the ASP dependencies. Must keep $tag in sync with build_test.sh. See
# save_mac_deps.sh for how to save and update a tarball with the dependencies.
# New dependencies can be created from scratch with the environment in
# stereopipeline-feedstock.
tag=asp_deps_mac_x64_v3 # Mac Intel. Sync up tag with build_test.sh.
# tag=asp_deps_mac_arm64_v3 # Mac Arm. Sync up tag with build_test.sh.
# tag=asp_deps_linux_v2 # Linux.
cd $HOME
wget https://github.com/NeoGeographyToolkit/BinaryBuilder/releases/download/${tag}/asp_deps.tar.gz > /dev/null 2>&1 # this is verbose
/usr/bin/time tar xzf asp_deps.tar.gz > /dev/null 2>&1 # this is verbose
# Set up conda
conda init bash
source ~/.bash_profile
conda activate asp_deps
# Install anaconda client. Will save the anaconda_env client on exit.
conda create -n anaconda -c conda-forge -c defaults -y anaconda-client
# Activate anaconda env
source ~/.bash_profile
conda activate anaconda
# Install conda-build in a separate environment. Do not save it on exit as it
# can have huge partial builds.
conda create -n build -c conda-forge -c defaults -y conda-build
source ~/.bash_profile
conda activate build
# Build ale. It is assumed the compiler is set up as above. May need to save the
# current ~/.ssh/id_rsa.pub key to Github in the user settings for recursive
# cloning of the submodules to work.
cd
git clone https://github.com/DOI-USGS/ale.git --recursive
cd ale
git submodule update --recursive # if refreshing the repo later
#git rebase origin/main
#git reset --hard 0ba7b24
export PREFIX=$HOME/miniconda3/envs/asp_deps
export PATH=$PREFIX/bin:$PATH
mkdir -p build && cd build
cmake .. \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
-DALE_USE_EXTERNAL_EIGEN=ON \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13 \
-DALE_USE_EXTERNAL_JSON=ON \
-DALE_BUILD_DOCS=OFF \
-DALE_BUILD_TESTS=OFF \
-DCMAKE_VERBOSE_MAKEFILE=TRUE \
-DCMAKE_INSTALL_PREFIX=${PREFIX}
make -j${CPU_COUNT} install
# Build usgscsm. It is assumed the compiler is set up as above.
cd
git clone https://github.com/DOI-USGS/usgscsm.git --recursive
cd usgscsm
git submodule update --recursive # if refreshing the repo later
#git rebase origin/main
mkdir -p build && cd build
export PREFIX=$HOME/miniconda3/envs/asp_deps
export PATH=$PREFIX/bin:$PATH
# -DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
# -DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
cmake .. \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13 \
-DUSGSCSM_EXTERNAL_DEPS=ON \
-DUSGSCSM_BUILD_DOCS=OFF \
-DUSGSCSM_BUILD_TESTS=OFF \
-DCMAKE_VERBOSE_MAKEFILE=TRUE \
-DCMAKE_INSTALL_PREFIX=${PREFIX}
make -j${CPU_COUNT} install
# Build ISIS3
cd
conda install -c conda-forge cmake doxygen \
c-compiler=1.7.0 cxx-compiler=1.7.0 \
fortran-compiler=1.7.0
git clone https://github.com/DOI-USGS/ISIS3.git
cd ISIS3
mkdir -p build
cd build
export ISISROOT=$PWD
#export PREFIX=$HOME/miniconda3/envs/asp_deps
export PREFIX=$CONDA_PREFIX
export PATH=$PREFIX/bin:$PATH
export ISISTESTDATA=$HOME/isis_test_data
conda env config vars set ISISTESTDATA=$ISISTESTDATA
ext=.so
if [ "$(uname)" = "Darwin" ]; then
ext=.dylib
fi
cmake \
-GNinja \
-DJP2KFLAG=OFF \
-Dpybindings=OFF \
-DbuildTests=ON \
-DCMAKE_BUILD_TYPE=Release \
-DBULLET_DEFINITIONS="-DBT_USE_DOUBLE_PRECISION" \
-DOPENCV_INCLUDE_DIR=$PREFIX/include/opencv4 \
-DPCL_INCLUDE_DIR=${PREFIX}/include/pcl-1.15 \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
../isis
#export NINJAJOBS=4; /usr/bin/time ninja install -j $NINJAJOBS # osx
/usr/bin/time ninja install
# Create a tarball with the updated packages. It will be uploaded as an
# artifact. The destination directory is set in the .yml file.
#
# See build_test.sh for how to use this artifact to save the updated packages to
# a permanent location.
mkdir -p ~/work/StereoPipeline/packages
/usr/bin/time tar cfz ~/work/StereoPipeline/packages/asp_deps.tar.gz \
/Users/runner/miniconda3/envs
# See the top of the document for how to save / fetch a tarball with dependencies.
# Done for now. Other packages have been built before.
exit 0
# Must create an ssh key to be able to clone the repos
# ssh-keygen -t rsa
# Add the key /Users/runner/.ssh/id_rsa.pub to github in Settings -> SSH and GPG keys
# Turn on the steps below only if starting from scratch
if [ 1 -eq 0 ]; then
echo Wiping old env
/bin/rm -rf /Users/runner/miniconda3/envs/asp_deps
# Fetch the isis env from the
/bin/rm -f environment.yml
wget https://raw.githubusercontent.com/DOI-USGS/ISIS3/refs/heads/dev/environment.yml
# Create the asp_deps env
echo Creating a new asp_deps env
conda env create -n asp_deps -f environment.yml
conda activate asp_deps
fi
# Install some needed tools
cd
conda install -c conda-forge -y parallel pbzip2
# Build the needed packages
# geoid
cd
wget https://github.com/NeoGeographyToolkit/StereoPipeline/releases/download/geoid1.0/geoids.tgz
tar xzf geoids.tgz
cd geoids
if [ "$(uname)" = "Darwin" ]; then
LIB_FLAG='-dynamiclib'
EXT='.dylib'
else
LIB_FLAG='-shared'
EXT='.so'
fi
# Build
${FC} ${FFLAGS} -fPIC -O3 -c interp_2p5min.f
${FC} ${LDFLAGS} ${LIB_FLAG} -o libegm2008${EXT} interp_2p5min.o
# Install
mkdir -p ${PREFIX}/lib
cp -fv libegm2008.* ${PREFIX}/lib
GEOID_DIR=${PREFIX}/share/geoids
mkdir -p ${GEOID_DIR}
cp -fv *tif *jp2 ${GEOID_DIR}
# libnabo
cd
export PREFIX=/Users/runner/miniconda3/envs/asp_deps
git clone https://github.com/NeoGeographyToolkit/libnabo.git
cd libnabo
mkdir -p build && cd build
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS='-O3 -std=c++11' \
-DCMAKE_C_FLAGS='-O3' \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DEIGEN_INCLUDE_DIR=${PREFIX}/include/eigen3 \
-DCMAKE_PREFIX_PATH=${PREFIX} \
-DBoost_DIR=${PREFIX}/lib \
-DBoost_INCLUDE_DIR=${PREFIX}/include \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_VERBOSE_MAKEFILE=ON \
..
make -j${CPU_COUNT} install
# libpointmatcher
cd
export PREFIX=$HOME/miniconda3/envs/asp_deps
git clone https://github.com/NeoGeographyToolkit/libpointmatcher.git
cd libpointmatcher
mkdir -p build && cd build
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS="-O3 -std=c++17" \
-DCMAKE_C_FLAGS='-O3' \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_PREFIX_PATH=${PREFIX} \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DBUILD_SHARED_LIBS=ON \
-DEIGEN_INCLUDE_DIR=${PREFIX}/include/eigen3 \
-DBoost_DIR=${PREFIX}/lib \
-DBoost_INCLUDE_DIR=${PREFIX}/include \
-DBoost_NO_BOOST_CMAKE=OFF \
-DBoost_DEBUG=ON \
-DBoost_DETAILED_FAILURE_MSG=ON \
-DBoost_NO_SYSTEM_PATHS=ON \
..
make -j${CPU_COUNT} install
# fgr
cd $SRC_DIR
git clone https://github.com/NeoGeographyToolkit/FastGlobalRegistration.git
cd FastGlobalRegistration
FGR_SOURCE_DIR=$(pwd)/source
mkdir -p build && cd build
INC_FLAGS="-I${PREFIX}/include/eigen3 -I${PREFIX}/include -O3 -L${PREFIX}/lib -lflann_cpp -llz4 -O3 -std=c++11"
cmake \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_CXX_FLAGS="${INC_FLAGS}" \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DCMAKE_PREFIX_PATH=${PREFIX} \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DFastGlobalRegistration_LINK_MODE=SHARED \
${FGR_SOURCE_DIR}
make -j${CPU_COUNT}
# Install
FGR_INC_DIR=${PREFIX}/include/FastGlobalRegistration
mkdir -p ${FGR_INC_DIR}
/bin/cp -fv ${FGR_SOURCE_DIR}/FastGlobalRegistration/app.h ${FGR_INC_DIR}
FGR_LIB_DIR=${PREFIX}/lib
mkdir -p ${FGR_LIB_DIR}
/bin/cp -fv FastGlobalRegistration/libFastGlobalRegistrationLib* ${FGR_LIB_DIR}
#s2p
cd
export PREFIX=/Users/runner/miniconda3/envs/asp_deps
conda activate asp_deps
conda install -c conda-forge -y fftw=3.3.10
git clone https://github.com/NeoGeographyToolkit/s2p.git --recursive
cd s2p
# update recursive submodules
git submodule update --init --recursive
export CFLAGS="-I$PREFIX/include -O3 -DNDEBUG -march=native"
export LDFLAGS="-L$PREFIX/lib"
# Fix for missing liblzma
#perl -pi -e "s#(/[^\s]*?lib)/lib([^\s]+).la#-L\$1 -l\$2#g" ${PREFIX}/lib/*.la
baseDir=$(pwd)
# Extension
if [ "$(uname)" = "Darwin" ]; then
EXT='.dylib'
else
EXT='.so'
fi
# Build the desired programs
cd 3rdparty/mgm
perl -pi -e "s#CFLAGS=#CFLAGS=$CFLAGS #g" Makefile
perl -pi -e "s#LDFLAGS=#LDFLAGS=$LDFLAGS #g" Makefile
make -j${CPU_COUNT}
cd $baseDir
# msmw
cd 3rdparty/msmw
mkdir -p build
cd build
cmake .. \
-DCMAKE_C_FLAGS="$CFLAGS" -DCMAKE_CXX_FLAGS="$CFLAGS" \
-DPNG_LIBRARY_RELEASE="${PREFIX}/lib/libpng${EXT}" \
-DTIFF_LIBRARY_RELEASE="${PREFIX}/lib/libtiff${EXT}" \
-DZLIB_LIBRARY_RELEASE="${PREFIX}/lib/libz${EXT}" \
-DJPEG_LIBRARY="${PREFIX}/lib/libjpeg${EXT}"
make -j${CPU_COUNT}
cd $baseDir
# msmw2
cd 3rdparty/msmw2
mkdir -p build
cd build
cmake .. \
-DCMAKE_C_FLAGS="$CFLAGS" -DCMAKE_CXX_FLAGS="$CFLAGS" \
-DPNG_LIBRARY_RELEASE="${PREFIX}/lib/libpng${EXT}" \
-DTIFF_LIBRARY_RELEASE="${PREFIX}/lib/libtiff${EXT}" \
-DZLIB_LIBRARY_RELEASE="${PREFIX}/lib/libz${EXT}" \
-DJPEG_LIBRARY="${PREFIX}/lib/libjpeg${EXT}"
make -j${CPU_COUNT}
cd $baseDir
# Install the desired programs
BIN_DIR=${PREFIX}/plugins/stereo/mgm/bin
mkdir -p ${BIN_DIR}
/bin/cp -fv 3rdparty/mgm/mgm ${BIN_DIR}
BIN_DIR=${PREFIX}/plugins/stereo/msmw/bin
mkdir -p ${BIN_DIR}
/bin/cp -fv \
3rdparty/msmw/build/libstereo/iip_stereo_correlation_multi_win2 \
${BIN_DIR}/msmw
BIN_DIR=${PREFIX}/plugins/stereo/msmw2/bin
mkdir -p ${BIN_DIR}
/bin/cp -fv \
3rdparty/msmw2/build/libstereo_newversion/iip_stereo_correlation_multi_win2_newversion \
${BIN_DIR}/msmw2
# libelas (does not work on Mac Arm)
cd
env=isis_dev # can also be asp_deps
export PREFIX=$(ls -d ~/*conda3/envs/$env)
export PATH=$PREFIX/bin:$PATH
conda activate $env
git clone https://github.com/NeoGeographyToolkit/libelas.git
cd libelas
# Set the env
export CFLAGS="-I$PREFIX/include -O3 -DNDEBUG -ffast-math -march=native"
export LDFLAGS="-L$PREFIX/lib"
if [ "$(uname)" = "Darwin" ]; then
EXT='.dylib'
else
EXT='.so'
fi
# build
mkdir -p build
cd build
cmake .. \
-DTIFF_LIBRARY_RELEASE="${PREFIX}/lib/libtiff${EXT}" \
-DTIFF_INCLUDE_DIR="${PREFIX}/include" \
-DCMAKE_CXX_FLAGS="-I${PREFIX}/include"
make -j${CPU_COUNT}
# Copy the 'elas' tool to the plugins subdir meant for it
BIN_DIR=${PREFIX}/plugins/stereo/elas/bin
mkdir -p ${BIN_DIR}
/bin/cp -fv elas ${BIN_DIR}/elas
# Multiview
cd
conda activate asp_deps
export PREFIX=$HOME/miniconda3/envs/asp_deps
conda install -c conda-forge \
rocksdb rapidjson
git clone https://github.com/NeoGeographyToolkit/MultiView.git --recursive
cd MultiView
# Must have ssh authentication set up for github
git submodule update --init --recursive
mkdir -p build && cd build
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
-DMULTIVIEW_DEPS_DIR=${PREFIX} \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13 \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_MODULE_PATH=$PREFIX/share/pcl-1.13/Modules \
-DCMAKE_CXX_FLAGS="-O3 -std=c++11 -Wno-error -I${PREFIX}/include" \
-DCMAKE_C_FLAGS='-O3 -Wno-error' \
-DCMAKE_INSTALL_PREFIX=${PREFIX}
make -j${CPU_COUNT} install
# PDAL
git clone https://github.com/PDAL/PDAL.git
cd PDAL
git checkout 2.9.3
mkdir -p build
cd build
export PREFIX=$HOME/miniconda3/envs/asp_deps
ldflags="-Wl,-rpath,${PREFIX}/lib -L${PREFIX}/lib -lgeotiff -lcurl -lssl -lxml2 -lcrypto -lzstd -lz"
if [ "$(uname)" = "Darwin" ]; then
EXT='.dylib'
else
EXT='.so'
# add unwind to ldflags on Linux
ldflags="$ldflags -lunwind"
fi
# Compilers should be auto-detected if the env is activated
# and has both them and cmake installed.
cmake ${CMAKE_ARGS} \
-DBUILD_SHARED_LIBS=ON \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DCMAKE_PREFIX_PATH=$PREFIX \
-DBUILD_PLUGIN_I3S=OFF \
-DBUILD_PLUGIN_TRAJECTORY=OFF \
-DBUILD_PLUGIN_E57=OFF \
-DBUILD_PLUGIN_PGPOINTCLOUD=OFF \
-DBUILD_PLUGIN_ICEBRIDGE=OFF \
-DBUILD_PLUGIN_NITF=OFF \
-DBUILD_PLUGIN_TILEDB=OFF \
-DBUILD_PLUGIN_HDF=OFF \
-DBUILD_PLUGIN_DRACO=OFF \
-DENABLE_CTEST=OFF \
-DWITH_TESTS=OFF \
-DWITH_ZLIB=ON \
-DWITH_ZSTD=ON \
-DWITH_LASZIP=ON \
-DWITH_LAZPERF=ON \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_CXX17_STANDARD_COMPILE_OPTION="-std=c++17" \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DWITH_TESTS=OFF \
-DCMAKE_EXE_LINKER_FLAGS="$ldflags" \
-DDIMBUILDER_EXECUTABLE=dimbuilder \
-DBUILD_PLUGIN_DRACO:BOOL=OFF \
-DOPENSSL_ROOT_DIR=${PREFIX} \
-DLIBXML2_INCLUDE_DIR=${PREFIX}/include/libxml2 \
-DLIBXML2_LIBRARIES=${PREFIX}/lib/libxml2${EXT} \
-DLIBXML2_XMLLINT_EXECUTABLE=${PREFIX}/bin/xmllint \
-DGDAL_LIBRARY=${PREFIX}/lib/libgdal${EXT} \
-DGDAL_CONFIG=${PREFIX}/bin/gdal-config \
-DZLIB_INCLUDE_DIR=${PREFIX}/include \
-DZLIB_LIBRARY:FILEPATH=${PREFIX}/lib/libz${EXT} \
-DCURL_INCLUDE_DIR=${PREFIX}/include \
-DPostgreSQL_LIBRARY_RELEASE=${PREFIX}/lib/libpq${EXT} \
-DCURL_LIBRARY_RELEASE=${PREFIX}/lib/libcurl${EXT} \
-DPROJ_INCLUDE_DIR=${PREFIX}/include \
-DPROJ_LIBRARY:FILEPATH=${PREFIX}/lib/libproj${EXT} \
..
make -j${CPU_COUNT} install
# OpenEXR
# This will be removed from ASP
# Build from source, to ensure the proper version of ilmbase is used
wget https://github.com/AcademySoftwareFoundation/openexr/archive/v2.5.5.tar.gz
tar xzfv v2.5.5.tar.gz
cd openexr-2.5.5
mkdir -p build && cd build
conda activate isis_dev
export PREFIX=$(ls -d ~/*conda3/envs/{asp_deps,isis_dev})
if [ ! -d "$PREFIX" ]; then
echo "Error: $PREFIX does not exist. Exiting."
#exit 1
fi
$PREFIX/bin/cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
-DCMAKE_PREFIX_PATH=$PREFIX \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_CXX_FLAGS='-O3 -std=c++11 -w' \
-DCMAKE_C_FLAGS='-O3 -w' \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13
make -j${CPU_COUNT} install
# Build theia
cd ~/work/StereoPipeline
conda install -c conda-forge vlfeat
conda install -c conda-forge rapidjson=1.1.0
conda install -c conda-forge rocksdb=8.5.3 gflags glog ceres-solver mesalib
# On linux, install mesa-libgl-cos6-x86_64
git clone git@github.com:NeoGeographyToolkit/TheiaSfM.git
cd TheiaSfM
mkdir -p build && cd build
export PREFIX=$HOME/miniconda3/envs/asp_deps
if [ ! -d "$PREFIX" ]; then
echo "Error: $PREFIX does not exist. Exiting."
#exit 1
fi
$PREFIX/bin/cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DMULTIVIEW_DEPS_DIR=${PREFIX} \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_CXX_FLAGS='-O3 -std=c++11 -Wno-error' \
-DCMAKE_C_FLAGS='-O3 -Wno-error' \
-DCMAKE_MODULE_PATH=$PREFIX/share/pcl-1.13/Modules \
-DCMAKE_INSTALL_PREFIX=${PREFIX}
# Build Multiview
# Dependencies. Use precisely same compiler that will be used in the conda recipe
git submodule update --init --recursive
conda install -c conda-forge vlfeat \
gflags=2.2.2 glog=0.7.1 \
ceres-solver=2.2.0 \
vlfeat \
'clang >=16,<17' 'clangxx >=16,<17'
conda install -c conda-forge \
rapidjson=1.1.0 \
rocksdb=8.5.3
git clone git@github.com:NeoGeographyToolkit/MultiView.git
mkdir -p build && cd build
# For OSX use a custom location for TBB. This is a fix for a conflict with embree.
# When that package gets updated to version 3 or 4 this may become unnecessary.
opt=""
if [[ $target_platform =~ osx.* ]]; then
opt="-DTBB_LIBRARY=${PREFIX}/lib/libtbb.12.dylib -DTBB_MALLOC_LIBRARY=${PREFIX}/lib/libtbbmalloc.2.dylib"
fi
# Set up the cc_comp compiler as above
# Enforce a compiler we know to work
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
-DMULTIVIEW_DEPS_DIR=${PREFIX} \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_CXX_FLAGS='-O3 -std=c++11' \
-DCMAKE_C_FLAGS='-O3' \
-DCMAKE_MODULE_PATH=$PREFIX/share/pcl-1.13/Modules \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
$opt
# Build
make -j${CPU_COUNT} install
# Make the python env
echo Creating a new python_isis8 env
/bin/rm -rf /usr/local/miniconda/envs/python_isis8
conda create -n python_isis8 python=3.12.0 numpy=1.26.2 -y
# Build visionworkbench
cd
conda activate asp_deps
# Set up the cc_comp compiler as above
# conda install -c conda-forge openblas
cd ~/work/StereoPipeline
export PREFIX=/Users/runner/miniconda3/envs/asp_deps
git clone https://github.com/visionworkbench/visionworkbench.git
cd visionworkbench
mkdir -p build
cd build
cmake .. \
-DASP_DEPS_DIR=$PREFIX \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp
echo Building VisionWorkbench
make -j${CPU_COUNT} install
# Build StereoPipeline
cd
conda activate asp_deps
# Set up the cc_comp compiler as above
export PREFIX=$HOME/miniconda3/envs/asp_deps
cd ~/work
mkdir copy_for_build
cd copy_for_build
git clone https://github.com/NeoGeographyToolkit/StereoPipeline.git
cd StereoPipeline
mkdir -p build
cd build
cmake .. \
-DASP_DEPS_DIR=$PREFIX \
-DCMAKE_VERBOSE_MAKEFILE=ON \
-DCMAKE_INSTALL_PREFIX=$PREFIX \
-DVISIONWORKBENCH_INSTALL_DIR=$PREFIX \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp
echo Building StereoPipeline
make -j${CPU_COUNT} install > /dev/null 2>&1 # this is too verbose
# Search for packages
conda search -c nasa-ames-stereo-pipeline --override-channels --platform osx-64
# Save current dependencies
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
# conda env export > ~/miniconda3/envs/asp_deps/asp_deps.yaml.bk
# To create an env, it appears important to use the flexible channel priority.
# Below creating the final asp_deps env, after ensuring all dependencies are good.
conda config --set channel_priority flexible
conda env create -n asp_deps -f asp_deps.yaml
# See the top of document for how to save / fetch a tarball with dependencies
# See also for how to install conda-build and anaconda client.
# geoid
cd ~/work/StereoPipeline
git clone https://github.com/NeoGeographyToolkit/geoid-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml geoid-feedstock
conda activate build
conda build -c nasa-ames-stereo-pipeline -c conda-forge geoid-feedstock
anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/geoid-1.0_asp3.5.0-2.conda
/Users/runner/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps geoid=1.0_asp3.5.0
# ilmbase
git clone https://github.com/NeoGeographyToolkit/ilmbase-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml ilmbase-feedstock
conda build -c conda-forge -c nasa-ames-stereo-pipeline ilmbase-feedstock
~/miniconda3/bin/anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/ilmbase-2.5.5-h01edc0c_1.conda
#conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps ilmbase=2.5.5
/Users/runner/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps nasa-ames-stereo-pipeline::ilmbase=2.5.5
/Users/runner/miniconda3/envs/anaconda/bin/anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/ilmbase-2.5.5-h01edc0c_0.conda
/Users/runner/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps ilmbase=2.5.5
# openexr
cd ~/work/StereoPipeline
https://github.com/NeoGeographyToolkit/openexr-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml openexr-feedstock
~/miniconda3/bin/anaconda upload upload /Users/runner/miniconda3/conda-bld/osx-64/openexr-2.5.5-ha5a8b8e_0.conda
/Users/runner/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps nasa-ames-stereo-pipeline::openexr=2.5.5
# libnabo
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
git clone https://github.com/NeoGeographyToolkit/libnabo-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml libnabo-feedstock
conda activate build
conda build -c nasa-ames-stereo-pipeline -c conda-forge libnabo-feedstock
~/miniconda3/bin/anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/libnabo-asp3.5.0-h01edc0c_1.conda
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps libnabo
# fgr
cd ~/work/StereoPipeline
git clone https://github.com/NeoGeographyToolkit/fgr-feedstock.git
conda activate asp_deps; conda env export > asp_deps.yaml
python StereoPipeline/conda/update_versions.py asp_deps.yaml fgr-feedstock
conda activate build
conda build -c nasa-ames-stereo-pipeline -c conda-forge fgr-feedstock
anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/fgr-asp3.5.0-h01edc0c_0.conda
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps fgr
# libpointmatcher
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
git clone https://github.com/NeoGeographyToolkit/libpointmatcher-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml libpointmatcher-feedstock
conda activate build
conda build -c nasa-ames-stereo-pipeline -c conda-forge libpointmatcher-feedstock
anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/libpointmatcher-asp3.5.0-ha5a8b8e_0.conda
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps libpointmatcher
# pdal
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
git clone https://github.com/NeoGeographyToolkit/pdal-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml pdal-feedstock
# Do not use ASP GDAL so exclude the ASP channel
conda activate build
conda build -c conda-forge pdal-feedstock |tee output_debug_pdal.txt
# s2p
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
git clone https://github.com/NeoGeographyToolkit/s2p-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml s2p-feedstock
conda build -c nasa-ames-stereo-pipeline -c conda-forge s2p-feedstock
anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/s2p-subset-asp3.5.0-h01edc0c_0.conda
# libelas
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
git clone https://github.com/NeoGeographyToolkit/libelas-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml libelas-feedstock
conda build -c nasa-ames-stereo-pipeline -c conda-forge libelas-feedstock
~/miniconda3/bin/anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/libelas-asp3.5.0-h01edc0c_0.conda
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps libelas
# Build Multiview with conda. Ensure that the same compile tools
# are used as above.
cd ~/work/StereoPipeline
conda activate asp_deps; conda env export > asp_deps.yaml
git clone https://github.com/NeoGeographyToolkit/multiview-feedstock.git
python StereoPipeline/conda/update_versions.py asp_deps.yaml multiview-feedstock
conda activate build
conda build -c nasa-ames-stereo-pipeline -c conda-forge multiview-feedstock
~/*conda3/bin/anaconda upload /Users/runner/miniconda3/conda-bld/osx-64/multiview-asp_3.5.0-py310_0.conda
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps multiview
# visionworkbench
cd ~/work/StereoPipeline
git clone https://github.com/NeoGeographyToolkit/visionworkbench-feedstock.git
conda activate asp_deps; conda env export > asp_deps.yaml
python StereoPipeline/conda/update_versions.py asp_deps.yaml visionworkbench-feedstock
conda activate build
conda build -c conda-forge -c nasa-ames-stereo-pipeline visionworkbench-feedstock 2>&1 |tee output_debug.txt
~/miniconda3/bin/anaconda upload upload /Users/runner/miniconda3/conda-bld/osx-64/visionworkbench-asp3.5.0-0.conda
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps visionworkbench
# StereoPipeline
conda config --set channel_priority flexible
cd ~/work/StereoPipeline
git clone https://github.com/NeoGeographyToolkit/stereopipeline-feedstock.git
conda activate asp_deps; conda env export > asp_deps.yaml
python StereoPipeline/conda/update_versions.py asp_deps.yaml stereopipeline-feedstock
conda activate build
conda build -c nasa-ames-stereo-pipeline -c usgs-astrogeology \
-c conda-forge stereopipeline-feedstock 2>&1 |tee output_debug.txt
~/miniconda3/bin/anaconda upload
~/miniconda3/bin/conda install -c nasa-ames-stereo-pipeline -c conda-forge -n asp_deps
# Prepare for packaging the tarball
conda install -c conda-forge pbzip2 chrpath cmake parallel
conda create -c conda-forge -n python_isis8 python=3.10.13 numpy=1.26.4
# Package with BinaryBuilder. The Mac Arm and Mac x84 use
# different paths to the python environment.
cd ~/work/StereoPipeline
git clone https://github.com/NeoGeographyToolkit/BinaryBuilder
cd BinaryBuilder
export PREFIX=$HOME/miniconda3/envs/asp_deps
export ISISROOT=$PREFIX
installDir=$PREFIX
envPath=$PREFIX
pythonPath=$(ls -d $HOME/miniconda3/envs/*python* | head -n 1)
echo installDir=$installDir
echo envPath=$envPath
echo pythonPath=$pythonPath
./make-dist.py $installDir \
--asp-deps-dir $envPath \
--python-env $(ls -d $HOME/*conda3/envs/python*)
================================================
FILE: .github/workflows/build_helper_linux.yml
================================================
name: build_helper_linux
on:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04
steps:
- uses: conda-incubator/setup-miniconda@v3
with:
auto-activate-base: true
activate-environment: ""
- uses: actions/checkout@v3
- name: Build deps on top of asp_deps
run: |
set -ex
# Fetch the asp_deps tarball (cross-compiled on Mac, has all deps
# except ISIS, Geoid, and Python env)
tag=asp_deps_linux_v2
cd $HOME
wget https://github.com/NeoGeographyToolkit/BinaryBuilder/releases/download/${tag}/asp_deps.tar.gz
/usr/bin/time tar xzf asp_deps.tar.gz -C $HOME
# Find the env
envPath=$(ls -d $HOME/*conda3/envs/asp_deps)
if [ ! -d "$envPath" ]; then
echo "Error: $envPath does not exist"
exit 1
fi
export PATH=$envPath/bin:$PATH
conda init bash
source ~/.bash_profile
conda activate asp_deps
# The tarball was stripped to fit GitHub releases (2GB limit).
# Removed: lib/python3.13/, static .a files, LLVM/clang, OpenVINO,
# openblas (MKL used), conda-meta, rclone, mysqld, locale-archive,
# opt/bullet, include/{bullet,qt6,vtk-9.5}.
# BLAS/LAPACK symlinks redirect to MKL (libmkl_rt.so).
# Reinstall stripped packages needed for ISIS build.
# Pin versions to match environment_asp_deps_linux.yml.
conda install -c conda-forge -y \
"bullet>=3.25" "qt6-main>=6,<7" "openblas" 2>/dev/null || true
# Create the Python env (needed for ASP packaging later).
# Must match the Python version in asp_deps.
conda create -n python_isis9 -c conda-forge -y python=3.13 numpy
# Set up compiler
cc_comp=x86_64-conda-linux-gnu-gcc
cxx_comp=x86_64-conda-linux-gnu-g++
PREFIX=$envPath
# Build Geoid (Fortran, can't cross-compile on Mac)
cd $HOME
wget https://github.com/NeoGeographyToolkit/StereoPipeline/releases/download/geoid1.0/geoids.tgz
tar xzf geoids.tgz
cd geoids
${PREFIX}/bin/x86_64-conda-linux-gnu-gfortran -fPIC -O3 -c interp_2p5min.f
${PREFIX}/bin/x86_64-conda-linux-gnu-gfortran -shared -o libegm2008.so interp_2p5min.o
cp -fv libegm2008.so ${PREFIX}/lib/
mkdir -p ${PREFIX}/share/geoids
cp -fv *.tif *.jp2 ${PREFIX}/share/geoids/
# Build ALE (already cross-compiled in tarball, skip for now)
if [ 1 -eq 0 ]; then
cd $HOME
git clone https://github.com/DOI-USGS/ale.git --recursive
cd ale
mkdir -p build && cd build
cmake .. \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
-DALE_USE_EXTERNAL_EIGEN=ON \
-DALE_USE_EXTERNAL_JSON=ON \
-DALE_BUILD_LOAD=ON \
-DALE_BUILD_DOCS=OFF \
-DALE_BUILD_TESTS=OFF \
-DCMAKE_INSTALL_PREFIX=${PREFIX}
make -j$(nproc) install
fi
# Build USGSCSM (already cross-compiled in tarball, skip for now)
if [ 1 -eq 0 ]; then
cd $HOME
git clone https://github.com/DOI-USGS/usgscsm.git --recursive
cd usgscsm
mkdir -p build && cd build
cmake .. \
-DCMAKE_C_COMPILER=${PREFIX}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${PREFIX}/bin/$cxx_comp \
-DUSGSCSM_EXTERNAL_DEPS=ON \
-DUSGSCSM_BUILD_DOCS=OFF \
-DUSGSCSM_BUILD_TESTS=OFF \
-DCMAKE_INSTALL_PREFIX=${PREFIX}
make -j$(nproc) install
fi
# Build ISIS (needs Qt6, Fortran deps - can't cross-compile on Mac)
cd $HOME
git clone https://github.com/DOI-USGS/ISIS3.git
cd ISIS3
git submodule update --init --recursive
mkdir -p build && cd build
export ISISROOT=$PWD
cmake .. \
-GNinja \
-DJP2KFLAG=OFF \
-Dpybindings=OFF \
-DBUILD_TESTING=OFF \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
../isis
/usr/bin/time ninja install
# Tar both envs (asp_deps + python_isis9)
mkdir -p ~/work/StereoPipeline/packages
cd $HOME
condaDir=$(basename $(dirname $envPath))
/usr/bin/time tar czf ~/work/StereoPipeline/packages/asp_deps.tar.gz \
${condaDir}/envs/asp_deps ${condaDir}/envs/python_isis9
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
name: asp-deps-linux-with-isis
path: ~/work/StereoPipeline/packages
retention-days: 2
continue-on-error: true
================================================
FILE: .github/workflows/build_test.sh
================================================
#!/bin/bash
# This is run by GitHub Actions to build and test the Mac version of ASP.
# See build_helper.sh for detailed build commands for ASP and its dependencies.
# Track infrastructure failures separately from test failures.
# Infrastructure failures (build, packaging) are fatal.
# Test validation failures are reported but not fatal.
build_failed=0
# Record the location where the script is running, which should
# be the base of the StereoPipeline repo. This must happen first.
aspRepoDir=$(pwd) # same as $HOME/work/StereoPipeline/StereoPipeline
if [ "$(basename $aspRepoDir)" != "StereoPipeline" ]; then
# Check that base dir is StereoPipeline
echo "Error: Directory: $aspRepoDir is not StereoPipeline"
exit 1
fi
# Other variables
baseDir=$(dirname $aspRepoDir) # one level up
installDir=$baseDir/install
# packageDir will later be uploaded, as set in the yml file
packageDir=$baseDir/packages
testDir=$baseDir/StereoPipelineTest
# Throw an error unless on a Mac
isMac=$(uname -s | grep Darwin)
if [ "$isMac" = "" ]; then
echo "This script is for Mac only"
exit 1
fi
# See if this is Arm64 or Intel x64
isArm64=$(uname -m | grep arm64)
# The ASP dependencies at the location below are updated using the script
# save_mac_deps.sh. See that script for more info. Sometimes the names and
# versions of these change during development.
if [ "$isArm64" != "" ]; then
echo "Platform: Arm64 Mac"
tag=asp_deps_mac_arm64_v3
envName=asp_deps
else
echo "Platform: Intel Mac"
tag=asp_deps_mac_x64_v3
envName=asp_deps
fi
# Fetch and unzip the ASP dependencies
bbUrl=https://github.com/NeoGeographyToolkit/BinaryBuilder/releases/download/${tag}
if [ "$isArm64" != "" ]; then
# ARM64: single tarball
wget ${bbUrl}/asp_deps.tar.gz > /dev/null 2>&1
/usr/bin/time tar xzf asp_deps.tar.gz -C $HOME > /dev/null 2>&1
else
# Intel x64: split into two tarballs (env exceeds 2 GB GitHub limit)
wget ${bbUrl}/asp_deps_p1.tar.gz > /dev/null 2>&1
wget ${bbUrl}/asp_deps_p2.tar.gz > /dev/null 2>&1
/usr/bin/time tar xzf asp_deps_p1.tar.gz -C $HOME > /dev/null 2>&1
/usr/bin/time tar xzf asp_deps_p2.tar.gz -C $HOME > /dev/null 2>&1
fi
# The env can be in miniconda3 or anaconda3
envPath=$(ls -d $HOME/*conda3/envs/${envName})
if [ ! -d "$envPath" ]; then
echo "Error: Directory: $envPath does not exist"
exit 1
fi
export PATH=$envPath/bin:$PATH
# These are of help in interactive mode but are not strictly needed in batch mode
conda init
source ~/.bash_profile
conda activate $envName
# Must use the linker from the conda environment to avoid issues with recent Intel Mac.
# The linker can be installed with conda as package ld64_osx-64 on conda forge.
# Put it in the asp_deps env.
cmake_opts=""
if [ "$isArm64" = "" ]; then
CONDA_LINKER="$(ls $envPath/bin/x86_64-apple-darwin*ld | head -n 1)"
if [ ! -f "$CONDA_LINKER" ]; then
echo "Error: File: $CONDA_LINKER does not exist"
exit 1
fi
ln -sf "$CONDA_LINKER" "$envPath/bin/ld" # Force the use of conda linker
cmake_opts="-DCMAKE_LINKER=$envPath/bin/ld"
fi
# Set up the compiler
if [ "$(uname)" = "Darwin" ]; then
cc_comp=clang
cxx_comp=clang++
else
cc_comp=x86_64-conda-linux-gnu-gcc
cxx_comp=x86_64-conda-linux-gnu-g++
fi
echo cc_comp=$cc_comp
echo cxx_comp=$cxx_comp
# Build visionworkbench
mkdir -p $baseDir
cd $baseDir
git clone https://github.com/visionworkbench/visionworkbench.git
cd visionworkbench
mkdir -p build
cd build
$envPath/bin/cmake .. \
-DASP_DEPS_DIR=$envPath \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.10 \
-DCMAKE_INSTALL_PREFIX=$installDir \
-DCMAKE_C_COMPILER=${envPath}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${envPath}/bin/$cxx_comp \
$cmake_opts
echo Building VisionWorkbench
make -j10 install > /dev/null 2>&1 # this is too verbose
# Log of the build, for inspection, in case it fails.
# This will resume from earlier.
out_build_vw=$(pwd)/output_build_vw.txt
make install > $out_build_vw 2>&1
tail -n 500 $out_build_vw
echo Log of VW build will be saved with the artifacts in $(basename $out_build_vw)
# Build StereoPipeline
cd $aspRepoDir
mkdir -p build
cd build
$envPath/bin/cmake .. \
-DASP_DEPS_DIR=$envPath \
-DCMAKE_OSX_DEPLOYMENT_TARGET=10.13 \
-DCMAKE_INSTALL_PREFIX=$installDir \
-DVISIONWORKBENCH_INSTALL_DIR=$installDir \
-DCMAKE_C_COMPILER=${envPath}/bin/$cc_comp \
-DCMAKE_CXX_COMPILER=${envPath}/bin/$cxx_comp \
$cmake_opts
echo Building StereoPipeline
make -j10 install > /dev/null 2>&1 # this is too verbose
ans=$?
if [ "$ans" -ne 0 ]; then
echo "Error: StereoPipeline build failed"
# Do not exit so we can save the build log
fi
# Log of the build, for inspection in case it fails
out_build_asp=$(pwd)/output_build_asp.txt
make install > $out_build_asp 2>&1
tail -n 500 $out_build_asp
echo Log of ASP build will be saved with the artifacts in $(basename $out_build_asp)
# Bugfix for duplicate LC_PATH failure. Wipe all values of LC_PATH.
for lib in $installDir/lib/*dylib; do
for f in $(otool -l $lib | grep -A 3 LC_RPATH | grep path | awk '{print $2}'); do
install_name_tool -delete_rpath $f $lib
done
done
export DYLD_LIBRARY_PATH=$installDir/lib:$DYLD_LIBRARY_PATH
# Package with BinaryBuilder. The Mac Arm and Mac x64 use
# different paths to the python environment.
echo Packaging the build
cd $baseDir
# Clone BinaryBuilder BEFORE setting DYLD_LIBRARY_PATH to include conda
# libs, as conda's libiconv conflicts with system git.
/usr/bin/git clone https://github.com/NeoGeographyToolkit/BinaryBuilder
cd BinaryBuilder
num=$(ls -d $HOME/*conda3/envs/python* | wc -l)
# Must have exactly one python env
if [ "$num" -ne 1 ]; then
echo "Error: Expected exactly one python env, found $num"
exit 1
fi
export ISISROOT=$envPath # needed for Mac Arm
# Do not add $envPath/lib to DYLD_LIBRARY_PATH. Conda's libiconv and ICU libs
# shadow system frameworks and crash CoreFoundation (Qt6 static init dies with
# "unrecognized selector" in CFStringGetFileSystemRepresentation on Sequoia).
# Use DYLD_FALLBACK_LIBRARY_PATH instead - it only kicks in when rpath/install_name
# lookup fails, so system libs are never shadowed.
export DYLD_FALLBACK_LIBRARY_PATH=$envPath/lib
# Qt6 crashes on macOS 15 during os version check in its static initializer.
# SYSTEM_VERSION_COMPAT=1 tells macOS to report version in a compatible way.
export SYSTEM_VERSION_COMPAT=1
./make-dist.py $installDir \
--asp-deps-dir $envPath \
--python-env $(ls -d $HOME/*conda3/envs/python*)
if [ $? -ne 0 ]; then
echo "Error: make-dist.py failed"
build_failed=1
fi
# Prepare the package for upload
mkdir -p $packageDir
mv -fv Stereo* $packageDir
# Extract the tarball so we can test it
cd $packageDir
tarBall=$(ls StereoPipeline-*.tar.bz2 | head -n 1)
if [ "$tarBall" == "" ]; then
echo Cannot find the packaged ASP tarball
build_failed=1
fi
/usr/bin/time tar xjf $tarBall > /dev/null 2>&1 # this is verbose
# Path to executables
binDir=$packageDir/$tarBall
binDir=${binDir/.tar.bz2/}
binDir=$binDir/bin
export PATH=$binDir:$PATH
echo "Binaries are in $binDir"
if [ ! -d "$binDir" ]; then
echo "Error: Directory: $binDir does not exist. Build failed."
build_failed=1
fi
# TODO(oalexan1): Run the tests as a different step in the .yml file.
# Extract the tests. This tarball has both the scripts, test data,
# and the expected results.
# TODO(oalexan1): Must fetch the StereoPipelineTest repo and update
# the scripts extracted from the tarball.
cd $baseDir
echo Testing the build.
wget https://github.com/NeoGeographyToolkit/StereoPipelineTest/releases/download/0.0.1/StereoPipelineTest.tar > /dev/null 2>&1 # this is verbose
# Check if we got the tarball
if [ ! -f "StereoPipelineTest.tar" ]; then
echo "Error: File: StereoPipelineTest.tar does not exist. Test failed."
build_failed=1
fi
tar xfv StereoPipelineTest.tar > /dev/null 2>&1 # this is verbose
# Note: If the test results change, a new tarball with latest scripts and test
# results must be uploaded. That is done by running the script:
# StereoPipeline/.github/workflows/update_mac_tests.sh in the local directory.
# The nightly build script fetches the testa data with the latest and reference
# results in tarball StereoPipelineTest.tar. That artifact will be uploaded
# further down.
# Go to the test dir
if [ ! -d "$testDir" ]; then
echo "Error: Directory: $testDir does not exist"
build_failed=1
fi
cd $testDir
# Run the tests. Failed to install pytest, despite trying hard.
# Just run them manually.
reportFile=$(pwd)/output_test.txt
rm -f $reportFile
ans=0
for d in ss*; do
# Skip unless a directory
if [ ! -d "$d" ]; then continue; fi
cd $d
echo Running test in $(pwd)
./run.sh > output.txt 2>&1
./validate.sh >> output.txt 2>&1
ans0=$?
tail -n 20 output.txt # this can be verbose
echo "Test $d returned $ans0"
echo "Test $d returned $ans0" >> $reportFile
if [ "$ans0" -ne 0 ]; then ans=1; fi # keep record of failures
cd ..
done
echo ans is $ans
# Set the test status. This is parsed after the build is downloaded.
echo test_status $ans >> $reportFile
if [ "$ans" -eq 0 ]; then
echo "All tests passed"
else
# Do not quit, as we want to save the test results
echo "Some tests failed"
fi
# Create the artifacts dir that will be saved
mkdir -p $packageDir
# Save the resulting test results as part of the artifacts. See above for how
# to use this to update the test results in the cloud.
echo Copying the build
(cd $testDir/..; tar cf $packageDir/$(basename $testDir).tar $(basename $testDir))
# Save these logs as part of the artifacts
echo Copying the logs
cp -rfv $out_build_vw $out_build_asp $reportFile $packageDir
# Wipe the extracted tarball so we do not upload it
# TODO(oalexan1): Consider extracting it to a different location to start with
rm -rfv $(dirname $binDir) > /dev/null 2>&1
# Exit with failure if build/packaging broke (infrastructure failure)
if [ "$build_failed" -ne 0 ]; then
echo "Build or packaging failed (see errors above)"
exit 1
fi
# Exit with test status so CI reports failure when tests fail
exit $ans
================================================
FILE: .github/workflows/build_test_mac_arm64.yml
================================================
name: build_test_mac_arm64
on:
workflow_dispatch:
jobs:
build:
runs-on: macos-latest
steps:
- uses: actions/checkout@v3
# No caching of dependencies, as the cache does not change when it is
# modified, and it can expire. See build_test.sh how to fetch an
# offline tarball with dependencies, and how to update that one if need
# be.
# - name: Cache conda
# id: cache-conda
# uses: actions/cache@v3
# env:
# cache-name: cache-conda
# with:
# path: |
# /usr/local/miniconda/envs
# key: ${{ runner.os }}-build-${{ env.cache-name }}-${{ hashFiles('**/miniconda.json') }}
# restore-keys: |
# ${{ runner.os }}-build-${{ env.cache-name }}-
# ${{ runner.os }}-build-
# ${{ runner.os }}-
- name: build_test
# Build and test ASP
run: ./.github/workflows/build_test.sh
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
# This must be in sync with BinaryBuilder/auto_build
name: StereoPipeline-Artifact-build_test_mac_arm64
path: ~/work/StereoPipeline/packages
retention-days: 2
continue-on-error: true
================================================
FILE: .github/workflows/build_test_mac_x64.yml
================================================
name: build_test_mac_x64
on:
workflow_dispatch:
jobs:
build:
runs-on: macos-15-intel
steps:
- uses: actions/checkout@v3
- name: build_test
# Build and test ASP
run: ./.github/workflows/build_test.sh
- name: Upload artifacts
if: always()
uses: actions/upload-artifact@v4
with:
# This must be in sync with BinaryBuilder/auto_build
name: StereoPipeline-Artifact-build_test_mac_x64
path: ~/work/StereoPipeline/packages
retention-days: 2
continue-on-error: true
================================================
FILE: .github/workflows/save_linux_deps.sh
================================================
#!/bin/bash
# Run this on a local machine to save the current dependencies as a release.
# This will wipe the old version with the same tag. See save_mac_deps.sh for
# more info.
# Check usage
if [ "$#" -lt 1 ]; then
echo "Usage: $0 tag"
exit 1
fi
tag=$1; shift
# Create the tarball of dependencies. This likely includes a pre-built ASP
# itself, but that is not a problem.
# Note: Must updated below the names of both envs if they change.
tarball=asp_deps.tar.gz
cd $HOME
echo Creating: $tarball
/usr/bin/time tar cfz $tarball \
miniconda3/envs/asp_deps \
miniconda3/envs/python_isis9
# Set up GitHub CLI
gh=$HOME/miniconda3/envs/gh/bin/gh
repo=git@github.com:NeoGeographyToolkit/BinaryBuilder.git
# Run $gh auth to authenticate
# Wipe old version
$gh release -R $repo delete $tag
# Save the tarball as a release
notes="Full tarball of latest ASP dev build dependencies"
/usr/bin/time $gh release -R $repo create $tag $tarball --title $tag --notes "$notes"
================================================
FILE: .github/workflows/save_mac_deps.sh
================================================
#!/bin/bash
# To update the Mac dependencies or create new ones, in manual (interactive)
# mode, run the ssh_mac_x64.yml / ssh_mac_arm.yml action to get ssh access to a
# Mac cloud instance.
# Then, fetch/build/update the dependencies. See
# https://stereopipeline.readthedocs.io/en/latest/building_asp.html
# for a high-level overview.
# Ideally all dependencies are built and then installed as conda packages.
# The script build_helper.sh has the commands for how build dependencies
# manually, if needed to understand failures when using conda.
# The updated dependencies should be installed in /Users/runner/miniconda3/envs.
# When done, and before exiting, save the dependencies, such as:
# mkdir -p ~/work/StereoPipeline/packages
# cd $HOME
# /usr/bin/time tar cfz ~/work/StereoPipeline/packages/asp_deps.tar.gz *conda3/envs
# After quitting the action (exiting the shell), the tarball will be saved as an
# artifact. Upload progress can be monitored in GitHub Actions.
# Then, from a local machine, which need not be a Mac, run this script.
# It will fetch the tarball from the cloud and then push it as a release
# to permanent location, with given tag.
# This tarball will be used to build VisionWorkplace and ASP. See the script
# build_test.sh.
# The tag set here must match the tag in build_test.sh and build_helper.sh. If
# changing here, must later change in the other places.
# This script will overwrite the dependencies. If in doubt, use it with a new
# tag, as the dependencies are very hard to recreate.
# TODO(oalexan1): For Intel Mac, the env exceeds the 2 GB GitHub release limit
# and must be split into two tarballs (asp_deps_p1.tar.gz and asp_deps_p2.tar.gz).
# This script does not yet handle the split. The build_test.sh script already
# expects two parts for Intel Mac.
# If the tarball with dependencies already exists locally, rather than being
# fetched as an artifact first, the commands from the latter part of
# this script can be run manually to upload the tarball as a release.
# How to run this script:
# For Mac x64:
# tag=asp_deps_mac_x64_xxx # sync up here with build_test.sh
# workflow="ssh_mac_x64.yml"
# $HOME/projects/StereoPipeline/.github/workflows/save_mac_deps.sh $workflow $tag
# For Mac Arm64:
# tag=asp_deps_mac_arm64_xxx # sync up here with build_test.sh
# workflow="ssh_mac_arm.yml"
# $HOME/projects/StereoPipeline/.github/workflows/save_mac_deps.sh $workflow $tag
# For Linux, the dependencies from the local machine can be saved as follows.
# tag=asp_deps_linux_xxx # change here
# $HOME/projects/StereoPipeline/.github/workflows/save_linux_deps.sh $tag
# Check usage
if [ "$#" -lt 2 ]; then
echo "Usage: $0 workflow tag"
exit 1
fi
# The workflow that saved the dependencies as artifact. Options:
# ssh_mac_x64.yml, ssh_mac_arm.yml
workflow=$1; shift
# The tag to use to save the dependencies. Must then use this tag
# to fetch the dependencies in build_test.sh and build_helper.sh.
tag=$1; shift
# The GitHub CLI tool. Can be installed in a new conda environment
# named 'gh' as follows:
# conda create -n gh -c conda-forge gh
gh=$(ls -d $HOME/*conda3/envs/gh/bin/gh)
repo=git@github.com:NeoGeographyToolkit/StereoPipeline.git
# Query the ${workflow}. Must check that that the top-most run is successful
$gh run list -R $repo --workflow=${workflow}
# Find the latest id, then fetch the artifacts for it
ans=$($gh run list -R $repo --workflow=${workflow} | grep -v STATUS | head -n 1)
completed=$(echo $ans | awk '{print $1}')
success=$(echo $ans | awk '{print $2}')
id=$(echo $ans | awk '{print $7}')
echo Stage: $completed
echo Status: $success
echo Id: $id
if [ "$success" != "success" ]; then
echo "Error: The ${workflow} workflow did not succeed"
exit 1
fi
echo Fetching the build with id $id from the cloud
echo $gh run download -R $repo $id
/bin/rm -rf ASP-dependencies-macOS # Must wipe this first, or else the download can fail
$gh run download -R $repo $id
# Must be careful with the line below. This is set in the ${workflow} file.
binaries=ASP-dependencies-macOS/asp_deps.tar.gz
if [ ! -f "$binaries" ]; then
echo "Error: File: $binaries does not exist"
exit 1
fi
# Add the tarball of dependencies as a release
# Can use a new tag here, or overwrite the existing tarball
# If making a new one, must make sure to update the tag in build_test.sh and build_helper.sh
repo=git@github.com:NeoGeographyToolkit/BinaryBuilder.git
# Wipe any old version
echo If present, deleting the old release for tag: $tag
$gh release -R $repo delete $tag -y 2>/dev/null # hide any error message for missing release
# Upload the new version
notes="Full tarball of latest ASP dev build dependencies"
echo Uploading a new version for tag: $tag
/usr/bin/time $gh release -R $repo create $tag $binaries --title $tag --notes "$notes"
================================================
FILE: .github/workflows/ssh_linux_arm.yml
================================================
name: ssh_linux_arm
on:
workflow_dispatch:
jobs:
build:
runs-on: ubuntu-22.04-arm
steps:
# Fetch miniconda
- uses: conda-incubator/setup-miniconda@v3
with:
auto-activate-base: true
activate-environment: ""
# Fetch the code
- uses: actions/checkout@v3
# No caching of dependencies, as the cache does not change when it is
# modified, and it can expire. See build_test.sh how to fetch an offline
# tarball with dependencies, and how to update that one if need be.
# Set up ssh for interactive use
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3 # disconnects after about 40 minutes
timeout-minutes: 600
with:
limit-access-to-actor: true
# Upload the artifacts that were saved in the directory below before exiting
# the ssh session. Useful for saving any results produced in this session.
# Can be later downloaded manually and saved to a permanent location.
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: ASP-dependencies-${{ runner.os }}
path: ~/work/StereoPipeline/packages
retention-days: 2
continue-on-error: true
================================================
FILE: .github/workflows/ssh_mac_arm.yml
================================================
name: ssh_mac_arm
on:
workflow_dispatch:
jobs:
build:
runs-on: macos-latest
steps:
# Fetch miniconda
- uses: conda-incubator/setup-miniconda@v3
with:
auto-activate-base: true
activate-environment: ""
# Fetch the code
- uses: actions/checkout@v3
# No caching of dependencies, as the cache does not change when it is
# modified, and it can expire. See build_test.sh how to fetch an offline
# tarball with dependencies, and how to update that one if need be.
# Set up ssh for interactive use
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3 # disconnects after about 40 minutes
timeout-minutes: 600
with:
limit-access-to-actor: true
# Upload the artifacts that were saved in the directory below before exiting
# the ssh session. Useful for saving any results produced in this session.
# Can be later downloaded manually and saved to a permanent location.
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: ASP-dependencies-${{ runner.os }}
path: ~/work/StereoPipeline/packages
retention-days: 2
continue-on-error: true
================================================
FILE: .github/workflows/ssh_mac_x64.yml
================================================
name: ssh_mac_x64
on:
workflow_dispatch:
jobs:
build:
runs-on: macos-15-intel
steps:
# Fetch miniconda
- uses: conda-incubator/setup-miniconda@v3
with:
auto-activate-base: true
activate-environment: ""
# Fetch the code
- uses: actions/checkout@v3
# Set up ssh for interactive use
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3 # disconnects after about 40 minutes
timeout-minutes: 600
with:
limit-access-to-actor: true
# Upload the artifacts that were saved in the directory below before exiting
# the ssh session. Useful for saving any results produced in this session.
# Can be later downloaded manually and saved to a permanent location.
# See save_mac_deps.sh for how to prepare the dependencies for upload.
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: ASP-dependencies-${{ runner.os }}
path: ~/work/StereoPipeline/packages
retention-days: 2
continue-on-error: true
================================================
FILE: .github/workflows/update_mac_tests.sh
================================================
#!/bin/bash
# Given the latest test results for Mac, as fetched (automatically) from the
# cloud as an artifact, update the reference results and push the updated tests
# back to the cloud. See build_test.sh for how this gets prepared and downloaded
# to the machine that launches the build and regressions.
# The tarball having the tests.
data=StereoPipelineTest.tar
# Check if it exists
if [ ! -f "$data" ]; then
# Maybe it is in a subdir. That happens when fetching the artifact.
ans=$(ls */$data | head -n 1)
if [ "$ans" != "" ]; then
cd $(dirname $ans)
fi
fi
# Check again
if [ ! -f "$data" ]; then
echo "Error: File: $data does not exist"
exit 1
fi
# Extract
echo "Extracting $data"
tar xfv $data > /dev/null 2>&1 # this is verbose
if [ ! -d "StereoPipelineTest" ]; then
echo "Error: Directory: StereoPipelineTest does not exist"
exit 1
fi
# Here may need to do some manual inspections
# Update the failed tests (each 'gold' is overwritten with 'run').
# This assumes that the "run" directories are trusted to be correct.
echo "Updating the tests"
for f in StereoPipelineTest/ss*/run; do
g=${f/run/gold}
/bin/rm -rfv $g
/bin/mv -fv $f $g
done
# If the scripts need to be updated, do it here, manually
# Must make all scripts in bin and individual tests executable
chmod a+x StereoPipelineTest/bin/* StereoPipelineTest/*/*sh
echo "Creating a new tarball"
tar cfv $data StereoPipelineTest
# Make sure the gh tool is executable
gh=$(ls -d $HOME/*conda3/envs/gh/bin/gh)
if [ ! -x "$gh" ]; then
echo "Error: Cannot find gh"
exit 1
fi
echo "Pushing the updated tarball to the cloud"
repo=git@github.com:NeoGeographyToolkit/StereoPipelineTest.git
tag=0.0.1
echo Wipe the old tests and upload the new ones
$gh release -R $repo delete $tag -y # wipe old tarball
notes="Update test results"
$gh release -R $repo create $tag $data --title $tag --notes "$notes" # upload new
================================================
FILE: .gitignore
================================================
# Generally ignorable files.
*~
/build
/build_linux
/install
/install_linux
docs/_build
GitHubReleases.txt
.vscode
*.code-workspace
================================================
FILE: .readthedocs.yml
================================================
# .readthedocs.yml
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
# Set the OS, Python version and other tools you might need
build:
os: ubuntu-22.04
tools:
python: "3.11"
# Build documentation in the docs/ directory with Sphinx
sphinx:
configuration: docs/conf.py
# Build documentation with MkDocs
#mkdocs:
# configuration: mkdocs.yml
# Optionally build your docs in additional formats such as PDF and ePub
formats:
- htmlzip
# Optionally set the version of Python and requirements required to build your docs
python:
install:
- requirements: docs/requirements.txt
================================================
FILE: AUTHORS.rst
================================================
The Ames Stereo Pipeline (ASP) was originally developed by the
Intelligent Robotics Group (IRG), in the Intelligent Systems Division
at the NASA Ames Research Center in Moffett Field, CA.
**Project Lead**
- Dr. `Ross Beyer `_ (NASA/SETI Institute)
**Core Development Team**
- Oleg Alexandrov (KBR, Inc., at NASA Ames)
- Scott McMichael (KBR, Inc., at NASA Ames)
**Contributors and Developers**
- Zachary Moratto (NASA/Stinger-Ghaffarian Technologies, former ASP Lead Architect)
- Michael J. Broxton (NASA/Carnegie Mellon University, former ASP Lead Architect
and Project Lead, co-developer of the Vision Workbench)
- Matthew Hancher (NASA, co-developer of the Vision Workbench)
- Dr. Ara Nefian (NASA/Carnegie Mellon University, wrote the Bayes EM subpixel mode
and the early version of Shape-from-Shading)
- Mike Lundy (NASA/Stinger-Ghaffarian Technologies)
- Dr. Laurence Edwards (NASA, former IRG Terrain Reconstruction Lead)
- Monica Palaseanu-Lovejoy (USGS, lead the research on the bathymetry module, :numref:`bathy_intro`)
- Dr. Randolph Kirk (USGS, contributed many insights to the shape-from-shading functionality, :numref:`sfs_usage`)
- Yu Tao and Jan-Peter Muller (University College London, contributed the CASP-GO stereo processing system, :numref:`casp_go`)
- Vinh To (NASA/Stinger-Ghaffarian Technologies)
- Kyle Husmann (California Polytechnic State University)
- Sasha Aravkin (Washington State University)
- Aleksandr Segal (Stanford)
- Patrick Mihelich (Stanford University)
- Melissa Bunte (Arizona State University)
- Matthew Faulkner (Massachusetts Institute of Technology)
- Todd Templeton (UC Berkeley)
- Morgon Kanter (Bard College)
- Kerri Cahoy (Stanford University)
- Ian Saxton (UC San Diego)
- Trey Smith (NASA, lead the Astrobee and ISAAC projects, that resulted in the rig calibrator,
:numref:`rig_calibrator`)
- `mstyer `_
- `harguess `_
- David Shean (University of Washington)
- Ben Smith (University of Washington)
- Andrew Annex (Johns Hopkins University)
- Joachim Meyer (University of Washington)
- Jay Laura (USGS)
- Shashank Bhushan (University of Washington)
Acknowledgments
---------------
The open source Stereo Pipeline leverages stereo image processing
work, led by Michael J. Broxton (NASA/CMU), Dr. Laurence Edwards
(NASA), Eric Zbinden (formerly NASA/QSS Inc.), Dr. Michael Sims
(NASA), and others in the Intelligent Systems Division at NASA Ames
Research Center. It has benefited substantially from the contributions
of Dr. Keith Nishihara (formerly NASA/Stanford), Randy Sargent
(NASA/Carnegie Mellon University), Dr. Judd Bowman (formerly NASA/QSS
Inc.), Clay Kunz (formerly NASA/QSS Inc.), and Dr. Matthew Deans
(NASA).
The initial adaptation of Ames's stereo surface reconstruction tools to
orbital imagers was a result of a NASA funded, industry led project to
develop automated DEM generation techniques for the MGS mission. Our
work with that project's Principal Investigator, Dr. Michael Malin of
Malin Space Science Systems (MSSS), and Co-Investigator, Dr. Laurence
Edwards of NASA Ames, inspired the idea of making stereo surface
reconstruction technology available and accessible to a broader
community. We thank Dr. Malin and Dr. Edwards for providing the initial
impetus that in no small way made this open source stereo pipeline
possible, and we thank Dr. Michael Caplinger, Joe Fahle and others at
MSSS for their help and technical assistance.
The tools for rig calibration (:numref:`rig_calibrator`), fusion of
points clouds into meshes (:numref:`multi_stereo`), and texturing of
meshes (:numref:`texrecon`), were originally developed as part of the
NASA ISAAC project, with Trey Smith as project manager, and rely
heavily on third-party packages, including Theia SfM, Ceres Solver,
VoxBlox, and MVS Texturing.
We'd also like to thank our friends and collaborators Dr. Randolph
Kirk, Dr. Brent Archinal, Trent Hare, Mark Rosiek, and David Mayer
of the United States Geological Survey's (USGS's) Astrogeology
Science Center in Flagstaff, AZ, for their encouragement and
willingness to share their experience and expertise by answering
many of our technical questions. We also thank them for their ongoing
support and efforts to help us evaluate our work. Thanks also to
the USGS ISIS team, especially Jeff Anderson, Kris Becker, Jay
Laura, and Jesse Mapel, for their help in integrating stereo pipeline
with the USGS ISIS software package.
Thanks go also to Dr. Mark Robinson, Jacob Danton, Ernest
Bowman-Cisneros, Dr. Sam Laurence, and Melissa Bunte at Arizona
State University for their help in adapting the Ames Stereo Pipeline
to lunar data sets including the Apollo Metric Camera.
We'd also like to thank Dr. David Shean, Dr. Ben Smith, and Dr. Ian
Joughin of the Applied Physics Laboratory at the University of
Washington for providing design direction for adapting Ames Stereo
Pipeline to Earth sciences.
Finally, we thank Dr. Ara Nefian, and Dr. Laurence Edwards for their
contributions to this software, and Dr. Terry Fong (IRG Group Lead
during the first decade or so of ASP's existence) for his management
and support of the open source and public software release process.
Portions of this software were developed with support from the
following sources from NASA Science Mission Directorate (SMD) and
Exploration Systems Mission Directorate (ESMD) funding sources:
- Mars Technology Program
- Mars Critical Data Products Initiative
- Mars Reconnaissance Orbiter mission
- Applied Information Systems Research program grant #06-AISRP06-0142
- Lunar Advanced Science and Exploration Research (LASER) program grants
#07-LASER07-0148 and #11-LASER11-0112
- ESMD Lunar Mapping and Modeling Program (LMMP)
- SMD Cryosphere Program
- The Resource Prospector site selection activity
- The VIPER mission site selection activity
- NASA-USGS Interagency Agreement #NNH16AC13I to support the Community
Sensor Model (CSM) work (2019-2021).
- Planetary Data Archiving and Tools program (PDART) grant #19-PDART19_2-0094
under Dr. Ross Beyer (2020-2022).
- NASA-USGS Interagency Agreement #30499, SAA2-403489 to support the satellite-derived
bathymetry work (2020-2021)
- NASA-USGS Interagency Agreement #NNH22OB02A to support interoperability between ISIS
and ASP (2022-)
- Decadal Survey Incubation for global Surface Topography and Vegetation (STV) grant #21-DSI-21-0008 under Dr. David Shean (2022-2025)
- Support for Open Source Tools, Frameworks, and Libraries (OSTFL) grant #20-OSTFL20-0050
under Dr. Ross Beyer (2023-2024)
Any opinions, findings, and conclusions or recommendations expressed in
this documentation are those of the authors and do not necessarily
reflect the views of the National Aeronautics and Space Administration.
================================================
FILE: CMakeLists.txt
================================================
cmake_minimum_required (VERSION 3.15.5)
project(StereoPipeline)
if(APPLE)
cmake_policy(SET CMP0025 NEW)
set(CMAKE_OSX_DEPLOYMENT_TARGET 10.15)
endif(APPLE)
# Make it possible to append to these from the command line
set(CMAKE_CXX_FLAGS "" CACHE STRING "")
set(CMAKE_C_FLAGS "" CACHE STRING "")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3 -std=c++17 -DNDEBUG")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O3")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pthread -lpthread")
# VisionWorkbench cannot be built with latest OpenEXR due to API changes.
# The few places where this library was used were changed to use text files
# instead of .exr files. So we disable OpenEXR support by default.
set(USE_OPENEXR OFF CACHE BOOL "Use EXR (default OFF)")
set(USE_ISIS ON CACHE BOOL "Use ISIS (default ON)")
set(CORE_ASP_ONLY OFF CACHE BOOL "Build only core ASP (default OFF)")
if (APPLE)
# A workaround for the clang included with conda build
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mlinker-version=305")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mlinker-version=305")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -mlinker-version=305")
# Prevent a pedantic error in recent clang
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-enum-constexpr-conversion")
# This a bugfix for spdlog issues
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DFMT_USE_CHAR8_T=0")
else()
# On Linux need to link to additional libraries
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lm -lrt -lgcc_s")
endif()
# Tell cmake to look in the /cmake folder.
list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake")
# Make sure we do a release type
set(CMAKE_BUILD_TYPE "Release")
message("Setting CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
add_subdirectory(src)
install(FILES "AUTHORS.rst" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES "INSTALLGUIDE.rst" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES "LICENSE" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES "NEWS.rst" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES "README.rst" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES "stereo.default.example" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(FILES "THIRDPARTYLICENSES.rst" DESTINATION ${CMAKE_INSTALL_PREFIX})
install(DIRECTORY "examples" DESTINATION ${CMAKE_INSTALL_PREFIX})
# Install only the .rst files as images take too much space
install(DIRECTORY "docs" DESTINATION ${CMAKE_INSTALL_PREFIX}
FILES_MATCHING PATTERN "*.rst")
# Install the plugins directory and the files in it
install(DIRECTORY DESTINATION ${CMAKE_INSTALL_PREFIX}/plugins/stereo)
install(FILES "plugins/stereo/plugin_list.txt"
DESTINATION ${CMAKE_INSTALL_PREFIX}/plugins/stereo)
# Make a directory for wv_correct data and copy that data to it
install(DIRECTORY DESTINATION ${CMAKE_INSTALL_PREFIX}/share/wv_correct)
install(FILES "src/asp/WVCorrect/ms_correction_lookup.txt"
DESTINATION ${CMAKE_INSTALL_PREFIX}/share/wv_correct)
install(FILES "src/asp/WVCorrect/WV02_BAND3_CCD_CORR.tif"
DESTINATION ${CMAKE_INSTALL_PREFIX}/share/wv_correct)
install(FILES "src/asp/Camera/tests/ctx.json"
DESTINATION ${CMAKE_INSTALL_PREFIX}/share/tests)
# Install the default CASP-GO params
install(FILES "src/asp/Gotcha/CASP-GO_params.xml"
DESTINATION ${CMAKE_INSTALL_PREFIX}/share)
================================================
FILE: CODE_OF_CONDUCT.rst
================================================
====================================
Contributor Covenant Code of Conduct
====================================
Our Pledge
----------
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, caste, color, religion, or sexual
identity and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
Our Standards
-------------
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or advances of
any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email address,
without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
Enforcement Responsibilities
----------------------------
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
Scope
-----
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official email address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
Enforcement
-----------
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement listed
as the Project Lead or members of the Core Development Team in the AUTHORS.rst
file.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
Enforcement Guidelines
----------------------
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
1. Correction
~~~~~~~~~~~~~
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
2. Warning
~~~~~~~~~~
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
3. Temporary Ban
~~~~~~~~~~~~~~~~
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
4. Permanent Ban
~~~~~~~~~~~~~~~~
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
Attribution
-----------
This Code of Conduct is adapted from the `Contributor Covenant
`_ version 2.1, available at
https://www.contributor-covenant.org/version/2/1/code_of_conduct.html.
Community Impact Guidelines were inspired by `Mozilla's code of conduct
enforcement ladder `_.
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
================================================
FILE: CONTRIBUTING.rst
================================================
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
You can contribute to ASP in many ways:
Types of Contributions
----------------------
Report Bugs or Ask for Features via Issues
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We want to hear from you! You can report bugs, ask for new features,
or just raise issues or concerns via logging an `Issue via our GitHub
repo `_.
Fix Bugs or Implement Features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Look through the GitHub Issues for bugs to fix or features to
implement. If anything looks tractable to you, work on it. Most (if
not all) pull requests should be based on an Issue, so if you're
thinking about doing some coding on a topic that isn't covered in an
Issue, please author one so you can get some feedback while you work
on your pull request.
Write Documentation
~~~~~~~~~~~~~~~~~~~
ASP could always use more documentation, whether as part of the
official docs, on the web in blog posts, articles, and such.
Submit Feedback
~~~~~~~~~~~~~~~
The best way to send feedback is to file an `Issue
`_.
Get Started!
------------
Ready to contribute?
You'll need to follow the instructions for building ASP from source,
which can be found in the INSTALLGUIDE.rst file or the Installation
chapter of the documentation.
1. Fork the `StereoPipeline` repo on GitHub.
2. Clone your fork locally::
$ git clone git@github.com:your_name_here/StereoPipeline.git
3. Create a branch for local development::
$ git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
4. When you're done making changes, check that your changes pass a run
of `make gtest_all` (though note that the unit tests have been
broken recently so this won't work).
5. Commit your changes and push your branch to GitHub::
$ git add .
$ git commit -m "Your detailed description of your changes."
$ git push origin name-of-your-bugfix-or-feature
6. Submit a `pull request `_.
Pull Request Guidelines
-----------------------
Before you submit a pull request, check that it meets these guidelines:
1. The pull request should include tests.
2. If the pull request adds functionality, the docs should be updated.
Add the feature to the list in NEWS.rst and potentially update the
README.rst or other documentation files.
What to expect
--------------
Our development of ASP is neither continuous, nor as well-funded as we
might like, and it is entirely possible that when you submit a PR
(pull request), none of us will have the time to evaluate or integrate
your PR. If we don't, we'll try and communicate that with you via the
PR.
For large contributions, it is likely that you, or your employer,
will be retaining your copyrights, but releasing the contributions
via an open-source license. It must be compatible with the Apache-2
license that ASP is distributed with, so that we can redistribute
that contribution with ASP, give you credit, and make ASP even
better! Please contact us if you have a contribution of that nature,
so we can be sure to get all of the details right.
For smaller contributions, where you (or your employer) are not
concerned about retaining copyright (but we will give you credit!),
you will need to fill out a Contributor License Agreement (CLA)
if we plan to accept your PR. The CLA assigns your copyright in
your contribution to NASA, so that our NASA copyright statement
remains true:
Copyright (c) YEAR, United States Government as represented by the
Administrator of the National Aeronautics and Space Administration.
All rights reserved.
There is an `Individual CLA `_ and a `Corporate CLA
`_.
ASP People
----------
- An ASP **Contributor** is any individual creating or commenting
on an issue or pull request. Anyone who has authored a PR that was
merged should be listed in the AUTHORS.rst file.
- An ASP **Committer** is a subset of contributors, typically NASA
employees or contractors, who have been given write access to the
repository.
================================================
FILE: ChangeLog
================================================
================================================
FILE: INSTALLGUIDE.rst
================================================
Installation
============
Precompiled binaries are available for the stable releases and the current
development build. Conda packages exist for the stable versions
(:numref:`conda_intro`).
.. _release:
Precompiled binaries
--------------------
Linux
~~~~~
Choose either the latest build (recommended) or a stable release
from:
- ``_
No installation steps or administrative rights are necessary. Extract
the archive, and run the executables in the ``bin`` subdirectory as::
tar xvf StereoPipeline-3.6.0-2025-12-26-x86_64-Linux.tar.bz2
./StereoPipeline-3.6.0-2025-12-26-x86_64-Linux/bin/stereo --help
The result of the last command should be a help message.
To permanently add the ASP executable subdirectory to your PATH, add to your
shell configuration (e.g., ``~/.bashrc``), a line similar to::
export PATH="${PATH}":"/path/to/StereoPipeline/bin"
Then, run ``source ~/.bashrc`` (or open a new terminal) for the changes to take
effect.
The latest additions are documented in :numref:`news`.
MacOS
~~~~~
ASP is available for the Mac Intel and Mac Arm architectures. The latter is
somewhat experimental but was shown to work well in testing. The Mac Arm package
has all ASP logic except the minor ``libelas`` stereo algorithm
(:numref:`libelas`).
The installation steps are the same as for Linux. It is important to
note that:
- An error may be shown about not being able to verify the developers. That can
be overridden in the Privacy & Security settings of the system. Consider using
instead the conda-based installation (:numref:`conda_intro`), which should
not have this issue.
- Running the Intel build the first time will be slow, as Rosetta will
translate the instructions to the native architecture. Subsequent runs will be
faster.
Windows
~~~~~~~
ASP does not offer Windows binaries. However, the Linux build can be run on
Windows using the `Windows Subsystem for Linux
`_ (WSL). Once a recent
Linux distribution is installed and verified to work, the installation steps are
the same as for Linux.
.. _conda_intro:
Conda-based installation
------------------------
The ASP 3.6.0 release (December 26, 2025) can be installed via conda, together with
ISIS 9.0.0 (:numref:`planetary_images`) for Linux, Mac Intel, and Mac Arm.
The Mac Arm release is experimental but was tested rather thoroughly. Since USGS
did not release an ISIS version for Mac Arm (as of 12/2025), this is shipped
with an unofficial ISIS Arm conda package, hosted on the
``nasa-ames-stereo-pipeline`` channel. This one lacks the Kakadu JPEG2000
library support. Consider using the Intel release under Rosetta 2 for
mission-critical work.
To install ``conda``, see:
https://docs.conda.io/en/latest/miniconda.html
Make the fetched installation file executable and run it, such as::
chmod u+x ./Miniconda3-latest-Linux-x86_64.sh
./Miniconda3-latest-Linux-x86_64.sh
on Linux, and analogously on OSX. Use the suggested::
$HOME/miniconda3
directory for installation.
Configure the conda channels::
conda config --env --add channels conda-forge
conda config --env --add channels usgs-astrogeology
conda config --env --add channels nasa-ames-stereo-pipeline
Do not skip doing each of these three, even if you think you already
have some of these channels.
Run::
conda config --show channels
to ensure that the order of channels is::
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
*Not having the channels in this order is likely to result in failure to install
ASP.* Do not use the ``defaults`` channel.
Install ASP with the commands::
conda config --set channel_priority flexible
conda create -n asp \
-c nasa-ames-stereo-pipeline \
-c usgs-astrogeology \
-c conda-forge \
-c defaults \
stereo-pipeline=3.6.0
This will create a new environment named ``asp`` and install ASP 3.6.0 together
with ISIS 9.0.0 and all other dependencies.
The priority setting is set to ``flexible``, as otherwise conda can get confused
if the same package (even with old versions) exists in more than one channel.
Note that the *latest build* (:numref:`release`) may have more features and
fixes than this official release.
Environment setup
~~~~~~~~~~~~~~~~~
Run::
conda activate asp
and set::
export ISISROOT=$CONDA_PREFIX
in any new shell. These should put the ASP binaries in the path, and will also
initialize various environmental variables, including ``ISISROOT`` and
``PROJ_DATA``.
Check if the ``stereo`` command is found by running::
which stereo
When working with planetary images with ISIS, the ``ISISDATA`` environmental
variable also needs to be set (:numref:`planetary_images`). For more information
see the `ISIS installation instructions
`_.
Alternative approaches
~~~~~~~~~~~~~~~~~~~~~~
Consider using ``mamba`` instead of ``conda`` for the installation, as it is
much faster. (Note that recent ``conda`` distributions default to using the
``mamba`` solver.)
ASP can be installed with Docker (`instructions
`_).
ASP can be built from source (:numref:`building_asp`).
Post-installation
-----------------
The next steps depend on whether it is desired to process planetary (non-Earth),
Earth, or aerial images.
.. _planetary_images:
Planetary images
~~~~~~~~~~~~~~~~
To process images from NASA's spacecraft that are exploring other planets,
install ISIS and its data. Summary of the steps:
#. Fetch ISIS binaries and install, following
https://github.com/DOI-USGS/ISIS3#installation
#. Fetch ISIS data, as detailed at
https://github.com/DOI-USGS/ISIS3#the-isis-data-area
#. Add the ISIS executables to your path:
- bash: ``export PATH="/path/to/isis/bin:${PATH}"``
- csh: ``setenv PATH "/path/to/isis/bin:${PATH}"``
#. Set the ``ISISDATA`` environmental variable to point to where your
ISIS data was downloaded, per the installation link above. For
example, in the ``bash`` shell, this is done as follows::
export ISISDATA="/path/to/isisdata"
Check that you have the directory ``$ISISDATA/base``.
#. Install Stereo Pipeline and set the ``PATH`` variable as above.
#. Try it out. See :numref:`lronac_csm` for a quick Lunar example which does not
require installing ISIS or it supporting data as above,
:numref:`moc_tutorial` for an example using Mars images and ISIS data, and
many other examples in :numref:`examples`.
Earth images
~~~~~~~~~~~~
Processing Earth images is described in the data processing tutorial in
:numref:`dg_tutorial`. See also examples for ASTER (:numref:`aster`), Pleiades
(:numref:`pleiades`), SkySat (:numref:`skysat`), and many more in
:numref:`examples`.
Aerial and historical images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fetch the software as above. Processing images without accurate camera
pose information is described in :numref:`sfm`. See also examples for
declassified satellite images in :numref:`kh4`.
.. _system_rec:
System requirements
-------------------
To run ASP, a computer cluster sharing storage and connected via ssh is needed
(:numref:`pbs_slurm`).
As a rule of thumb, for images on the order of 20,000 x 20,000 pixels, a machine
with 40 GB of RAM 16 cores could likely produce a terrain model in 4 - 20 hours.
There is a lot of uncertainty here, and much depends on the choice of the stereo
algorithm (:numref:`stereo_alg_overview`), and if mapprojection is employed
(:numref:`mapproj-example`).
1 TB of storage or more is suggested.
Common errors
-------------
Here are some errors you might see, and what it could mean. Treat these
as templates for problems. In practice, the error messages might be
slightly different.
::
Error: **ERROR** Unable to initialize camera model in Camera Factory.
**PROGRAMMER ERROR** Unable to create a shape model from
given target and pvl.
**I/O ERROR** Unable to open [$ISISDATA/].
Stereo step 0: Preprocessing failed
You need to set up your ISIS environment or manually set the correct
location for ``ISISDATA`` (:numref:`planetary_images`).
::
bash: stereo: command not found
You need to add the ``bin`` directory of your deployed Stereo Pipeline
installation to the environmental variable ``PATH``
(:numref:`planetary_images`).
::
/bin/sh: camrange: command not found
You need to to add the ``bin`` directory of your ISIS installation to your path (:numref:`planetary_images`).
::
Cache size (500 MB) is larger than the requested maximum cache size
Consider increasing ``--cache-size-mb`` for your program.
This also may be a sign of large input TIF images being stored
in blocks as tall or as wide as the image. The storage scheme of
an image can be examined with the ``gdalinfo -stats`` command,
and an image can be rewritten with square blocks using the command::
gdal_translate -co compress=lzw -co TILED=yes -co INTERLEAVE=BAND \
-co BLOCKXSIZE=256 -co BLOCKYSIZE=256 input.tif output.tif
If the new images are used instead, that warning should go away and
the processing time should go down. Both ``gdalinfo`` and
``gdal_translate`` are included with ASP.
================================================
FILE: LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: NEWS.rst
================================================
Changes since last release
--------------------------
Added a new program for distributed stereo processing across computing nodes
(:numref:`stereo_dist`).
Added a new program to study parameter sensitivity by running stereo on small
patches with different parameter combinations (:numref:`stereo_sweep`).
Added the ``asp_plot`` package for generating diagnostic plots and PDF reports
from ASP outputs (:numref:`asp_plot`).
Added cloud-optimized GeoTIFF (COG) support via the ``--cog`` option to
``point2dem``, ``mapproject``, etc. (:numref:`cog_output`).
Added support for plain-text match files (:numref:`txt_match`).
Added support for SPOT 6 and 7 exact linescan camera models (:numref:`spot67`).
Migrated SPOT 5 to a CSM linescan camera model (:numref:`spot5`). This enables
use of ``jitter_solve`` (:numref:`jitter_pleiades`).
Migrated PeruSat-1 to a CSM linescan camera model (:numref:`perusat1`). This
enables use of ``jitter_solve`` (:numref:`jitter_pleiades`).
For the ASTER camera (:numref:`aster`), removed the ``--aster-use-csm`` option
as that is the default. Removed the use of the helper RPC model for interest
point matching.
sfm_view (:numref:`sfm_view`):
* Brought the source code into the ASP repository from the external
MultiView dependency.
* Input images are no longer required. Only camera files are needed.
aster2asp (:numref:`aster2asp`):
* Added support for ASTER Level 1A V004 HDF format.
bathymetry (:numref:`bathy_intro`):
* Added a program for computing the effective refraction index of water
for a satellite band (:numref:`refr_index`).
* Added documentation on alternative spectral indices for water masking
(:numref:`bathy_water_masking`).
bathy_plane_calc (:numref:`bathy_plane_calc`):
* Refined the algorithm by taking into account that given a mask of land
pixels, the true water interface is somewhere half way between those and the
water pixels.
* Added the option ``--ortho-mask`` to use a georeferenced orthoimage
mask and a DEM to estimate the water plane (:numref:`bathy_plane_ortho_mask`).
* Added the option ``--lon-lat-measurements`` to use a CSV file with
longitude and latitude measurements and lookup heights in a DEM
(:numref:`bathy_plane_lonlat`).
* Removed the option ``--use-ecef-water-surface``. It is now always assumed
that the bathy plane is computed in local projected coordinates.
cam_test (:numref:`cam_test`):
* Added the options ``--bathy-plane`` and ``--refraction-index`` to model
bathymetry correction with ray bending according to Snell's law.
bundle_adjust (:numref:`bundle_adjust`):
* Replaced ``--save-cnet-as-csv`` with ``--save-cnet-as-gcp``. The control
network is now saved after optimization and outlier filtering, with
optimized triangulated point positions, including any input GCP.
* Added bathymetry support (:numref:`ba_bathy`).
jitter_solve (:numref:`jitter_solve`):
* Added ``--save-cnet-as-gcp`` to save the optimized control network in
GCP format, after outlier filtering (:numref:`bagcp`).
ipfind (:numref:`ipfind`):
* Added the option ``--binary-to-txt`` to convert .vwip files to text.
ipmatch (:numref:`ipmatch`):
* Added options for converting match files between binary and text formats
(:numref:`ipmatch_convert`).
* Added an option for merging match files (:numref:`ipmatch_merge`).
image_calc (:numref:`image_calc`):
* Added the option ``--stretch`` to stretch an image and save it with 8-bit
pixels, for visualization purposes (:numref:`image_calc_stretch`).
* For a single input image, the option ``-c`` (calculation operation) defaults
to ``var_0`` (identity operation).
* The default output data type is now ``float32`` (previously ``float64``).
disparitydebug (:numref:`disparitydebug`):
* Added the option ``--raw`` to extract the horizontal and vertical
disparities without normalization (:numref:`raw_disp`).
pc_align (:numref:`pc_align`):
* Print to the terminal and the log file the following initial and final
error stats: Mean, StdDev, RMSE, Median, NMAD.
* Changed the default for ``--diff-rotation-error`` from 1e-8 to 1e-5 degrees,
to avoid slow convergence in some cases due to numerical precision issues.
parallel_stereo (:numref:`parallel_stereo`):
* Added the option ``--proj-win`` to limit stereo to a projection window for
mapprojected images (:numref:`stereodefault`).
* The logic for consolidating the output directory has been given its own
stage (:numref:`entrypoints`).
* The DEM for mapprojected images can be set with ``--dem`` instead of as the
last positional argument (:numref:`mapproj-example`).
sfs (:numref:`sfs`):
* Recommend the variance-based uncertainty estimation (:numref:`sfs_unc`).
* Added the option ``--save-covariances`` (:numref:`sfs_opt`).
rig_calibrator (:numref:`rig_calibrator`):
* Replaced ``--camera-position-weight`` with ``--camera-position-uncertainty``.
* Added the option ``--heights-from-dem`` to constrain triangulated points
relative to a DEM.
* Added an example of an orbital rig with a DEM constraint (:numref:`orbital_rig`).
* All command-line options now use dashes instead of underscores. For example,
``--out_dir`` is now ``--out-dir``.
theia_sfm (:numref:`theia_sfm`):
* Made the SfM reconstruction deterministic by passing a fixed random seed
to TheiaSfM. Previously, results could vary between runs due to time-based
random seeding in RANSAC.
* All command-line options now use dashes instead of underscores. For example,
``--rig_config`` is now ``--rig-config``.
camera_solve (:numref:`camera_solve`):
* Made the SfM reconstruction deterministic by passing a fixed random seed
to TheiaSfM. Previously, results could vary between runs due to time-based
random seeding in RANSAC.
parse_match_file.py (:numref:`parse_match_file`):
* When converting from binary to text, descriptors are no longer saved by
default. Use the ``--save-descriptors`` option to include them.
stereo_gui (:numref:`stereo_gui`):
* The ``--colorize`` and ``--colorbar`` options now work with overlaid and
georeferenced images. Added ``--no-colorize`` (:numref:`colorize`).
mapproject (:numref:`mapproject`):
* Documented compatibility with the upcoming ISIS ``cam2map asp_map=true``
option, which uses the same per-pixel projection algorithm
(:numref:`mapproj_isis`).
* Consistently handle ISIS special pixels in ``.cub`` input images.
Misc:
* Greatly improved the speed of parsing WorldView and SPOT5 XML camera files.
* Removed the unused and undocumented option ``--use-least-squares`` for ray
triangulation in stereo and bundle adjustment.
* Added the option ``--bathy-plane-list`` to supplement ``--bathy-plane``
to various programs.
* Removed the old qi2txt program for reading IceBridge QFIT binary data.
* Removed unused quad-tree tile generators and visualization formats (TOAST,
Celestia, Gigapan, GMap, TMS, Uniview) from VisionWorkbench. Only KML is
kept for ``image2qtree``.
* Do not use more than 128 threads in ``parallel_stereo``, as OpenBlas
fails then.
* Removed the undocumented ``dem_mosaic`` options ``--first-dem-as-reference``
and ``--this-dem-as-reference``.
* The ``--t_projwin`` option in ``mapproject`` no longer subtracts one
grid size from the maximum, so the requested extent is fully covered.
* Much removal of templates and breaking up of functions and files into smaller
and better-organized components.
* Removed the unused HDR module, Python SWIG bindings, and legacy tools
(contourgen, blend, bayer, learnpca, doc-generate) from VisionWorkbench.
* Removed the ``grassfirealpha`` tool from VisionWorkbench. Simplified
``image2qtree`` to produce uint8 RGBA output, removing the
``--channel-type`` and ``--terrain`` options.
* Removed the unused ``georef`` tool from VisionWorkbench.
* Removed the ``--preserve-pixel-type`` option from ``undistort_image``.
* Removed the ``slopemap`` tool from VisionWorkbench. Use ``gdaldem slope``
and ``gdaldem aspect`` instead.
* Removed the ``hsv_merge`` tool. Use ``gdaldem color-relief`` with
``gdaldem hillshade`` for similar results.
* Removed the unused ``PixelHSV`` and ``PixelLuv`` pixel types from
VisionWorkbench.
* Updated from Qt5 to Qt6.
RELEASE 3.6.0, December 26, 2025
--------------------------------
DOI: https://zenodo.org/records/18064111
Stable release doc: https://stereopipeline.readthedocs.io/en/stable/index.html
- Available for Linux, Mac Intel, and Mac Arm (the latter is experimental).
- Includes ISIS 9.0.0 (experimental for Mac Arm), GDAL 3.8.1 and PDAL 2.9.3.
camera_solve (:numref:`camera_solve`):
* Works on Mac Arm.
* Removed dependency on OpenImageIO.
parallel_stereo (:numref:`parallel_stereo`):
* Added an example for JunoCam images (:numref:`junocam`).
* Added examples for the Chandrayaan-2 lunar orbiter (:numref:`chandrayaan2`).
* Rewrote the KH-9 example to take into account the improved modeling of
optical bar cameras and a new strategy for fixing local warping
(:numref:`kh9`).
* Added a status file that shows the progress in processing tiles
(:numref:`stereo_diag`).
* Added an output shapefile that shows the tiles being processed
(:numref:`stereo_diag`).
* Left and right alignment matrices are now saved in plain text format. Older
.exr files can no longer be read (:numref:`outputfiles`).
* Erode less at at image boundary during filtering when ``--subpixel-mode`` is
not between 1 and 6 (option ``--edge-buffer-size``,
:numref:`filter_options`).
* Determination of interest point matches is optional when either
``--corr-search`` or ``--seed-mode 2`` are set
(:numref:`stereodefault`).
parallel_sfs (:numref:`parallel_sfs`):
* When albedo and / or haze is modeled, initial estimates for these are
produced for the full site (:numref:`parallel_sfs_usage`).
sfs (:numref:`sfs`):
* Added the option ``--save-variances``, that saves the DEM variance
(and albedo variance, if albedo is floated).
* Added a method for removing seams in the SfS terrain (:numref:`sfs_seams`).
* Improved the documentation for how to measure and repair the misalignment
between the input DEM and output SfS DEM (:numref:`sfs_align_refine`).
* Added a documentation section for how to do registration based on an SfS
terrain to refine the alignment of this SfS DEM and the images
(:numref:`sfs_sim`).
* Options ``--albedo-constraint-weight`` and ``--albedo-robust-threshold`` are
respected when low-resolution initial albedo is estimated (with option
``--estimate-exposure-haze-albedo``).
* Replaced ``--save-computed-intensity-only`` with ``--save-sim-intensity-only``
and ``--save-meas-intensity-only``.
* Renamed ``comp-albedo-final.tif`` to ``albedo-final.tif``, for consistency with
other output files.
* Unused option ``--estimate-slope-errors`` got removed.
parallel_bundle_adjust (:numref:`parallel_bundle_adjust`):
* Bugfix for a crash when there are no interest point matches.
bundle_adjust (:numref:`bundle_adjust`):
* Added the option ``--max-gcp-reproj-err`` to remove GCPs with large
reprojection error.
* Changed the implementation of the camera position constraint
(:numref:`ba_cam_constraints`).
* Added the option ``--fixed-distortion-indices`` to control which lens
distortion parameters are kept fixed.
* The option ``--auto-overlap-params`` accepts an optional third argument (all
in quotes) that has the number of subsequent images that overlap to use in
matching. Also works with option ``--match-first-to-last``.
* For the option ``--mapprojected-data``, the DEM specified at the end is
optional, if it can be looked up from the geoheader of the mapprojected
images.
point2dem (:numref:`point2dem`):
* Added support for LAS COPC files (:numref:`point2dem_las`).
* Added the option ``--gdal-tap``.
* Removed unused options: ``--phi-rotation``, ``--omega-rotation``,
``--kappa-rotation``. A rotation matrix can be applied with ``pc_align``
instead.
pc_align (:numref:`pc_align`):
* Added support for LAS COPC files (:numref:`pc_align_las`).
* The hillshade-based alignment uses by default ``gdaldem hillshade``
(:numref:`pc_hillshade`).
cam_gen (:numref:`cam_gen`):
* Can fit a CSM linescan camera to an OpticalBar camera
(:numref:`opticalbar2csm`).
* Can fit a CSM frame camera model with radial distortion.
* Support pixel pitch that is not just 1 in CSM cameras.
* If the input is an ISIS cube and the output is a CSM camera, save the
ephemeris time, sun position, serial number, and target (planet) name.
* Added the option ``--camera-center-llh``.
mapproject (:numref:`mapproject`):
* Added the option ``--gdal-tap``.
* Bugfix for erosion during mapprojection at DEM boundary when the DEM and
output image have the same grid size.
dem2gcp (:numref:`dem2gcp`):
* Added the options ``--max-num-gcp``, ``--max-disp``, ``--gcp-sigma-image``.
* Added the options ``--image-list``, ``--camera-list``,
``--match-files-prefix``, and ``--clean-match-files-prefix`` to work
with multiple input images and matches.
* Added the option ``--max-pairwise-matches`` to control the maximum
number of matches loaded from any given match file.
jitter_solve (:numref:`jitter_solve`):
* Added documentation on limitations (:numref:`jitter_limitations`).
* Changed the implementation of the camera position constraint
(:numref:`jitter_camera`).
* Added the options ``--fix-gcp-xyz``, ``--use-lon-lat-height-gcp-error``.
* Changed the lens distortion model for ASTER cameras to be radial-tangential.
This produces better results after solving for jitter.
point2las (:numref:`point2las`):
* Added the option ``--dem`` to convert a DEM to LAS.
dem_mosaic (:numref:`dem_mosaic`):
* Added the option ``--gdal-tap``.
camera_footprint (:numref:`camera_footprint`):
* Added the option ``--output-shp`` to save the convex hull of the camera
footprint as a shapefile.
stereo_gui (:numref:`stereo_gui`):
* Added the option ``--zoom-all-to-same-region``.
image_align (:numref:`image_align`):
* Added the option ``--nodata-value``.
* Supports the same controls for interest point detection as
``bundle_adjust`` (:numref:`image_align_opts`).
* The value of option ``--output-prefix`` is now required to be non-empty. It
is set by default to ``out_image_align/run``.
image_subset (:numref:`image_subset`):
* This program is now multi-threaded. Each image is read fully in memory for
speed.
misc:
* Added minimum system requirements for running ASP (:numref:`system_rec`).
* Made the OpticalBar model 3x faster by switching from minimization in 3D
to root-finding in 2D with the Newton-Raphson method.
* Turned off experimental ``--subpixel-mode 6`` as it is failing to run
(:numref:`subpixel_options`).
* Unused ``pca`` mode in ``ipfind`` got removed.
* Bugfix for modifying the creation time of ISIS cubes when it was meant to
only read them.
* Bugfix for when ``parallel_stereo`` has the same value for the output prefix
and bundle adjustment prefix.
* Bugfix for stereo triangulation when the point cloud is huge and has data
only in corners.
RELEASE 3.5.0, April 28, 2025
-----------------------------
DOI: `10.5281/zenodo.15298734 `_
Stable release doc: https://stereopipeline.readthedocs.io/en/stable/index.html
*New platform*: An experimental native Mac M1/M2 Arm64 build is available
(:numref:`release`).
bundle_adjust (:numref:`bundle_adjust`):
* Replaced the algorithm for creating control networks when there are more
than two images. Notably more features in more than two images now can be
found.
* Added the option ``--save-adjusted-rpc`` to save RPC cameras with adjustments
applied to them (:numref:`rpc_and_ba`).
* Added the option ``--min-distortion`` to ensure small distortion parameters
get optimized.
* Added the option ``--max-triangulation-angle``.
* Compensate for the images in the input nvm being potentially in different
order than the images specified on the command line.
* The report file measuring statistics of registration errors on the ground
got broken up into errors per image and per image pair
(:numref:`ba_mapproj_dem`).
parallel_bundle_adjust (:numref:`parallel_bundle_adjust`):
* The default number of processes per node is 1/4 of the number of cores on
the head node, and the default number of threads per process is the number
of cores on the head node over the number of processes.
* The number of launched jobs is number of nodes times number of processes
per node. This appears best for load balancing.
* Create interest points (before matching) once per image, not each time per
image pair. This speeds up the processing.
mapproject (:numref:`mapproject`):
* If the input DEM is in the ``longlat`` projection, a projection
in meters is auto-determined (:numref:`mapproj_auto_proj`).
* Added the option ``--ref-map`` to borrow the grid size and projection from
an existing mapprojected image (:numref:`mapproj_refmap`).
* Add the option ``--query-pixel``.
jitter_solve (:numref:`jitter_solve`):
* Do two passes by default. This improves the results.
* Can model rig constraints between sensors (:numref:`jitter_rig`).
* Added an example for the Kaguya Terrain Camera (:numref:`jitter_kaguya`).
* Added the option ``--camera-position-uncertainty`` (:numref:`jitter_camera`).
* Can constrain against a sparse point cloud (:numref:`jitter_ref_terrain`).
* Added the option ``--smoothness-weight`` to control high-frequency changes
in the camera orientations in linescan cameras.
* Can use GCP files.
* Can read a control network from an nvm file.
* Write the stereo convergence angles. Can write registration errors on the
ground (:numref:`other_jitter_out`).
stereo_gui (:numref:`stereo_gui`):
* Changing the image threshold updates the display correctly.
* When creating GCP, ask before quitting without saving them. Save the IP as
well when GCP are saved.
* Added the option ``--gcp-sigma`` for creating GCP.
* Big speedup when rendering a stack of georeferenced images.
image_calc (:numref:`image_calc`):
* Added an example for how to extract the horizontal and vertical disparity
bands while setting invalid disparities to a no-data value
(:numref:`mask_disparity`).
sat_sim (:numref:`sat_sim`):
* Added the option ``--rig-sensor-rotation-angles``, to be able to produce
a rig to desired specifications (:numref:`sat_sim_rig_adjust`).
* Can apply a periodic or random perturbation to given cameras
(:numref:`sat_sim_perturb`).
* Added the option ``--blur-sigma``, to blur the simulated images. This can
help simulate the effect of degraded images due to fog, motion, etc.
parallel_stereo (:numref:`parallel_stereo`):
* Added an example of processing Umbra SAR images (:numref:`umbra_sar`).
* Added an example of refining intrinsics and stereo with Chang'e 3 images
(:numref:`change3`).
* The initial low-resolution disparity from a DEM works with mapprojected
images (:numref:`d_sub_dem`).
* Added a discussion of various ways ASP can make use of existing terrain data
(:numref:`existing_terrain`).
* If the number of matches from disparity is much less than requested, try to
find more matches. This usually brings their number in the ballpark.
* The option ``--num-matches-from-disparity`` was made equivalent to
``--num-matches-from-disp-triplets``, and the triplet logic now works
with mapprojected images (:numref:`dense_ip`).
* It is possible to mapproject either with ``dg`` or ``rpc`` cameras
when using mapprojected images in stereo with DigitalGlobe / Maxar
cameras (:numref:`dg-mapproj`).
* Enable stereo with vendor-supplied images that have been mapprojected onto
surfaces of constant height above a datum (:numref:`mapproj_ortho`).
* Added the option ``--band`` to process a given band (channel) from
multispectral images (:numref:`stereodefault`).
* With alignment methods ``none`` and ``epipolar``, the option
``--corr-search`` will work even when interest point matching fails
(:numref:`corr_section`).
* Skip tiles for which there is no valid low-resolution disparity.
* Throw an error if the left and right mapprojected images have different
resolutions, as this can lead to incorrect results.
* Print a warning in ``stereo_pprc`` and ``stereo_tri`` if the stereo
convergence angle is too small.
* Added the options ``--enable-atmospheric-refraction-correction`` and
``--enable-velocity-aberration-correction`` for Pleiades linescan cameras
(these are enabled by default for WorldView cameras only). It is not clear
if these corrections improve or not Pleiades accuracy.
sfm (:numref:`sfm`):
* Added an example for processing data acquired with an UAS, with known
metadata (:numref:`sfm_uas`).
sfs (:numref:`sfs`):
* Added an SfS example for Earth (:numref:`sfs_earth`).
* Added a CTX Mars example (:numref:`sfs_ctx`).
* Added the program ``image_subset`` for selecting a subset of images that
have almost the same coverage as the full input set
(:numref:`image_subset`).
* Added the option ``--sun-angles`` to specify the Sun azimuth and elevation
angles.
* Bugfix in modeling atmospheric haze.
* Removed the ability to work on multiple clips at once, as it was not used.
* Can ingest a provided albedo map (of same size as the input DEM). Option:
``--input-albedo``.
* Removed the RPC approximation logic. Use instead the option
``--use-approx-camera-models`` with ISIS cameras.
* Removed the option ``--float-cameras``. It is more reliable to optimize
the cameras beforehand, in bundle adjustment.
* Removed obsolete options ``--float-dem-at-boundary``,
``--float-sun-position``, ``--coarse-levels``.
* Have option ``--crop-input-images`` be always on.
pc_align (:numref:`pc_align`):
* Added the Nuth and Kaab algorithm (:numref:`nuth`).
* Added an example of how to use dense image correlation for alignment
(:numref:`pc_corr`).
* Speed up the computation of shared bounding box and loading of source
points.
cam2rpc (:numref:`cam2rpc`):
* When a DEM is passed in, sample not just the DEM surface but its bounding
box, to create a more robust RPC model.
* The produced RPC file has been streamlined to a minimum of metadata.
point2las (:numref:`point2las`):
* Replaced the option ``--triangulation-error-factor`` for saving the triangulation
error as a scaled int with the option ``--save-triangulation-error``, that
saves it in double precision without scaling.
* Added the options ``--save-intensity-from-image`` and ``--save-stddev``.
point2dem (:numref:`point2dem`):
* The default projection for WGS84 is now UTM / polar stereographic.
For other datums it is local stereographic (:numref:`point2dem_proj`).
* Adjust the region passed in via the option ``--t_projwin`` so that, as
usual, the DEM grid coordinates are integer multiples of the grid size.
* Handle robustly invalid input points.
* Remove old options ``--use-surface-sampling`` and ``--fsaa``.
* Bugfix for slow performance with dynamic CRS.
* Changed the default output nodata-value to -1e+6, as the smallest float
may not be displayed accurately by some software.
gcp_gen (:numref:`gcp_gen`):
* Make the interest point matching work better by invoking the full
machinery and options from ``bundle_adjust``.
image_align (:numref:`image_align`):
* Let the default alignment method be ``rigid`` rather than ``translation``.
cam_gen (:numref:`cam_gen`):
* Added the option ``--camera-center``.
* Can export an RPC camera model to .xml format (:numref:`cam_gen_rpc`).
dem_mosaic (:numref:`dem_mosaic`):
* Added the option ``--weight-list`` for blending DEMs given external weights
(:numref:`dem_mosaic_external_weights`).
* Renamed the option ``--dem-list-file`` to ``--dem-list``. The old option
is kept for backward compatibility.
* Can handle DEMs with NaN values.
dem_geoid (:numref:`dem_geoid`):
* Accept a custom geoid correction via ``--geoid-path``. Added support for a
Moon geoid.
orbit_plot (:numref:`orbit_plot`):
* Added the options ``--use-rmse``, ``--output-file``.
isis (:numref:`planetary_images`):
* Upgraded to ISIS 8.3.0.
misc:
* The logic for triangulation with RPC cameras changed (:numref:`rpc_tri`).
* In ``bundle_adjust`` and ``jitter_solve``, save the lists of images and
optimized camera file names (or adjustments). Can be passed in back to
any of these tools (:numref:`ba_out_cams`).
* The option ``--flann-method`` in ``bundle_adjust`` and ``stereo`` defaults to
using the slower but deterministic ``kmeans`` method for a smaller set of
interest points, and to ``kdtree`` otherwise (:numref:`stereodefault-pprc`).
* When creating dense interest point matches from disparity and mapprojected
images, the match file reflects the name of the original unprojected images
(:numref:`dense_ip`).
* Bugfix for a crash with the ``asp_sgm`` and ``asp_mgm`` algorithms when the
disparity search range is large.
* Print the stereo convergence angle in ``stereo_pprc`` with mapprojected
images and with epipolar alignment. These are the remaining cases that were
not handled before.
* The ``mapproject`` and ``parallel_sfs`` programs will not fail if the work
directory has spaces (this fix is a workaround, the bug is in GNU Parallel).
* Renamed ``--csv-proj4`` to ``--csv-srs``. This accepts any GDAL WKT,
GeoJSON, or PROJ string. The previous option is still accepted for backward
compatibility.
* Support images with up to 12 bands (channels), up from 6.
* Support files with the .nitf extension.
* Can handle no-data values larger than valid pixel values.
* Wiped extremely old and unused SPICE logic.
* Wiped the unused old option ``--mask-flatfield``. Can use with stereo
the option ``--nodata-value`` to mask values no more than this value.
* The ``geodiff`` program output image is with float pixels, rather than
in double precision.
* Have the OpenCV interest point detectors respect the ``--threads`` option.
* Have ``bundle_adjust`` and ``parallel_stereo`` use same
``--ip-inlier-factor`` value by default.
* Bugfix for loading camera adjustments when mapprojected images are passed
in, rather than the raw ones.
* Can read Airbus Pleiades RPC XML files that have both a "global" and a
"partial" camera model. The global one will be used.
* Dependence on package ``htdp`` removed. This was needed for
``datum_convert``.
RELEASE 3.4.0, June 19, 2024
----------------------------
*This release is is available only as binaries, and not as a conda package*
(:numref:`conda_intro`).
DOI: `10.5281/zenodo.12176190 `_
Stable release doc: https://stereopipeline.readthedocs.io/en/stable/index.html
New tools:
* Added ``orbit_plot.py`` (:numref:`orbit_plot`), a tool for plotting
camera orientations along an orbit (contributed by Shashank Bhushan).
* Added ``gcp_gen`` (:numref:`gcp_gen`), a program for generating ground
control points (GCP) based on ortho images. Helps create camera models from
scratch.
* Added ``dem2gcp`` (:numref:`dem2gcp`), a tool that can greatly help solve
for lens distortion that manifests itself as large horizontal warping in the
DEM.
New camera support:
* Added the ability to use the CSM camera model with ASTER images
(:numref:`aster_csm`).
New external library support:
* Migrated to PDAL 2.6.0 from libLAS for LAS input/output (in ``pointlas``,
``point2dem``, and ``pc_align``), as libLAS is no longer developed.
WorldView (DigitalGlobe) cameras (:numref:`dg_tutorial`):
* The WorldView linescan model got moved to a CSM implementation. The
transitional option ``--dg-use-csm`` was removed. The new implementation is
about 5x faster for ground-to-image projections.
* Re-enabled correcting velocity aberration and atmospheric refraction.
These corrections are now implemented in the CSM camera model, and, unlike
before, play nicely with bundle adjustment (:numref:`dg_csm`).
* The options ``--enable-correct-velocity-aberration`` and
``--enable-correct-atmospheric-refraction`` got removed.
* Non-DG cameras do not use these corrections, as a case for that has not been
made.
jitter_solve (:numref:`jitter_solve`):
* Added an example for ASTER cameras (:numref:`jitter_aster`).
* Added an example with 27 CTX images (:numref:`jitter_multiple_images`).
* Added the option ``--weight-image``, to weigh observations based on
geographic location of triangulated points (:numref:`limit_ip`).
* Can handle several sensors with very similar positions and orientations
(:numref:`jitter_rig`).
* Support reading the ISIS ``jigsaw`` binary control network
format (:numref:`jitter_ip`).
* Can read and write CSM model state embedded in ISIS .cub files
(:numref:`embedded_csm`).
* Replaced the option ``--translation-weight`` with
``--camera-position-weight``, which is off by default, as it may affect the
convergence. The new option adapts appropriately to the number of interest
points and the ground sample distance (:numref:`jitter_camera`).
* The ``--tri-weight`` constraint is now the default, with a positive value of
0.1. This is adjusted for GSD (:numref:`jitter_tri_constraint`).
* Added report files having the change in camera positions
(:numref:`jitter_cam_offsets`), triangulated points
(:numref:`jitter_tri_offsets`), and stats of pixel reprojection errors per
camera (:numref:`jitter_errors_per_camera`).
* Replaced the option ``--heights-from-dem-weight`` with
``--heights-from-dem-uncertainty`` (1 sigma, in meters). This is more
physically meaningful (as a rule of thumb, use the inverse of what was
previously the weight value).
* Integrated the logic behind ``--reference-dem`` into ``--heights-from-dem``,
with an approach that combines the strength of both. Removed
``--reference-dem``.
* Can use anchor points with frame cameras.
* Added ``--num-anchor-points-per-tile``. This helps when different
images have different sizes but want to ensure the same point density.
* Added the option ``--anchor-weight-image`` that is used to limit
where anchor points are placed.
* The roll and yaw constraints no longer assume linescan camera positions and
orientations are one-to-one.
* Order of images in each interest point match file need not be the same
as for input images.
bundle_adjust (:numref:`bundle_adjust`):
* Added the ability to refine the camera intrinsics for several groups of
cameras, with each group sharing intrinsics (:numref:`kaguya_ba`).
* Can mix frame and linescan cameras, while controlling for each
group of cameras which intrinsics should be optimized
(:numref:`ba_frame_linescan`).
* Support reading and writing the ISIS ``jigsaw`` binary control network
format (:numref:`jigsaw_cnet`).
* Can read and write CSM model state embedded in ISIS .cub files
(:numref:`embedded_csm`).
* Support reading and writing the NVM format for control networks
(:numref:`ba_nvm`).
* Added the option ``--camera-position-weight``, with a default value of 0.0.
This is an internally adjustable constraint to keep the cameras from moving
too much. It may prevent the reduction in reprojection error
(:numref:`ba_cam_constraints`).
* Remove the option ``--translation-weight``. The translation is now
automatically controlled by default by the camera position weight.
* Added the option ``--camera-position-uncertainty`` to set hard constraints
on the horizontal and vertical uncertainty for each camera
(:numref:`ba_cam_constraints`).
* Added report files having the change in camera positions
(:numref:`ba_camera_offsets`) and triangulated points
(:numref:`ba_tri_offsets`).
* The option ``--tri-weight`` is now set by default to 0.1, and adjusted for
GSD. The option ``--camera-weight`` is by default 0.0. This work better
than before at preventing the cameras from moving when optimizing them.
* Replaced the option ``--heights-from-dem-weight`` with
``--heights-from-dem-uncertainty`` (1 sigma, in meters). This is more physically
meaningful (as a rule of thumb, use the inverse of what was previously the
weight value).
* Integrated the logic behind ``--reference-dem`` into ``--heights-from-dem``,
with an approach that combines the strength of both. Removed
``--reference-dem``.
* Added the option ``--propagate-errors`` to propagate the uncertainties from
input cameras to triangulated points (:numref:`ba_error_propagation`).
* Added the option ``--weight-image``, to weigh observations based on
geographic location of triangulated points. (:numref:`limit_ip`).
* For ASTER cameras, use the RPC model to find interest points. This does
not affect the final results but is much faster.
* When optimizing intrinsics, cameras that do not share distortion can
have different distortion types and sizes. (:numref:`limit_ip`).
* Each image passed to ``--mapprojected-data`` reads from its geoheader
the camera and adjustment prefix for undoing the mapprojection.
* Fixed a bug when both ``--initial-transform`` and
``--input-adjustments-prefix`` are used.
* Can use the image names in ``--camera-list`` when images contain the camera
models.
* The pixel reprojection errors are adjusted correctly for pixel sigma in
the report files (:numref:`ba_errors_per_camera`, :numref:`ba_err_per_point`).
* The default outlier removal parameters are more generous, to avoid removing
valid interest point matches when the input images have distortion (option
``--remove-outliers-params``).
* The combination of options ``--mapprojected-data`` and
``--auto-overlap-params`` will restrict the interest point matching to the
region of overlap (expanded by the percentage in the latter option). This
can result in great efficiency gains for large images.
* Made the Tsai lens distortion agree precisely with OpenCV's implementation
(:numref:`pinholemodels`). There was a small numerical problem and the K3
coefficient was not part of the distortion model.
* Replaced the Tsai lens undistortion implementation, for a 10x speedup.
* Added the OpenCV fisheye lens distortion model and also the FOV model
(:numref:`pinholemodels`). These are for wide-angle lenses.
* Bugfix: points for which initial triangulation failed are flagged as
outliers right away. See ``--forced-triangulation-distance`` for
fine-grained control.
* Order of images in each previously created interest point match file need
not be the same as for input images.
* RPC lens distortion is now applied to pixels that are normalized by focal
length, in addition to being offset by the principal point. This is
consistent with the radial-tangential and fisheye models, and produces a
more accurate fit to other models. *Previously created models are now
invalid*.
* RPC undistortion is now done with a solver rather than using separate
undistortion coefficients. This much more accurate but slower
(:numref:`pinholemodels`).
* Added an example of using RPC distortion for KH-7 cameras, for which
an exact model is not available (:numref:`kh7_fig`).
* Ensure that outlier filtering with ``--min-triangulation-angle`` is done
after each pass with refined cameras and for all ways of reading a control
network.
* Load and save the camera models in parallel, for speed (except for ISIS).
* Bugfix: if some intrinsics are shared, sync them up before optimization.
parallel_stereo (:numref:`parallel_stereo`):
* Added Kaguya processing example (:numref:`kaguya_tc`).
* When a run finished successfully, combine the data from subdirectories and
delete these. See ``--keep-only`` for more options.
* Made the tiles for the ``asp_mgm`` / ``asp_sgm`` algorithms bigger, with
smaller padding, which should be about 2x faster (:numref:`ps_tiling`).
* Added an illustration of several stereo algorithms (:numref:`stereo_alg_fig`).
* Fixed a failure when processing images that have very large blocks (on the
order of several tens of thousands of pixels along some dimension, as shown
by ``gdalinfo``). A warning, progress bar, and timing info is displayed.
* For the ``asp_sgm`` and ``asp_mgm`` algorithms allow ``cost-mode`` to
have the value 3 or 4 only, as other values produce bad results.
* Fix a failure when the working directory has a space in its name.
* Bugfix for memory usage with very large images.
point2dem (:numref:`point2dem`):
* Added the option ``--auto-proj-center``, to automatically compute the
projection center for stereographic and other projections
(:numref:`point2dem_proj`).
* When the lon-lat projection is used, the output DEM longitude range
is always in [-180, 180], unless using [0, 360] results in a smaller range
(such as when crossing the 180 degree meridian).
* Added the option ``--scalar-error`` to find the norm of the triangulated
error vector (if applicable).
* Can read a ground-level point cloud stored as a tif file with 3 bands,
representing the x, y, and z coordinates of the points, with z being
vertical (option ``--input-is-projected``).
* Bugfix for when all heights are equal. A valid DEM is produced.
* Do not assume the datum is WGS84 by default, as this can result in
incorrect DEMs. The datum, projection, or semi-axes must be set
(or read from the input PC/LAS file).
gdal (:numref:`gdal_tools`):
* Full support for WKT and GeoJSON for the projection string (option
``--t_srs``) in ``point2dem``, ``point2las``, ``mapproject``,
``dem_mosaic``, ``cam2rpc``. Can still use PROJ.4 strings.
* Georeferenced images with different datums cannot be used together. Use
``gdalwarp`` to convert them to a common datum.
* Upgraded to GDAL 3.8.0 and PROJ 9.3.0.
csm (:numref:`csm`):
* Upgraded to USGSCSM 2.0.1.
* Fixed several problems in generation of CSM cameras for MSL Curiosity Nav
and Mast images. Much large-scale testing was performed. Updated the
example showing how to create stereo from either Nav or Mast stereo pairs
(:numref:`csm_msl`).
* A multi-Martian-day example for MSL added (:numref:`csm_msl_multiday`).
* Added support for the radial and tangential distortion model
with 3 radial distortion parameters and 2 tangential ones. Tested
that it agrees with the OpenCV implementation.
* Fixed a small bug in radial distortion implementation.
stereo_gui (:numref:`stereo_gui`):
* Can show scattered data with a colorbar and axes
(:numref:`scattered_points_colorbar`).
* Renamed ``--colorize-image`` to ``--colorbar``.
* Right-click on a colorized image to set the range of intensities to
colorize.
* Can view ISIS control network files (:numref:`stereo_gui_isis_cnet`).
* Auto-guess and load ``pc_align`` error files (:numref:`pc_align_error`).
* When loading an .nvm file with features that are not shifted relative
to the optical center, must specify ``--no-shift``. This avoids confusion
as to whether a shift is present or not (:numref:`stereo_gui_nvm`).
colormap (:numref:`colormap`):
* Added the option ``--hillshade`` to create a hillshaded colormap.
image_calc (:numref:`image_calc`):
* When adding new keywords to metadata geoheader, do not erase the existing
ones (if a keyword already exists, its value will be modified).
* Added the ability to create a random image.
pc_align (:numref:`pc_align`):
* Add the option ``--skip-shared-box-estimation``.
historical_helper.py (:numref:`historical_helper`):
* Added the ability to set a custom path to the needed ``convert``
executable and described how that tool can be installed.
sfs (:numref:`sfs`):
* Added two examples for Kaguya TC, for single and multiple illumination
conditions (:numref:`sfs_kaguya`).
* Added the option ``--albedo-robust-threshold``.
isis (:numref:`moc_tutorial`):
* The ISIS libraries are compiled from source, and reflect the code after
the ISIS 8.0.3 release (:numref:`conda_intro`).
* Made the operation of projecting into an ISIS linescan camera 2.2-2.6 times
faster by using the secant method to find the best sensor line.
* Expanded the ``jigsaw`` documentation (:numref:`jigsaw`). This is the
ISIS bundle adjustment tool.
cam_gen (:numref:`cam_gen`):
* Can fit a CSM frame camera to a given input camera, including distortion
(:numref:`cam_gen_frame`).
* Can export linescan cameras to CSM format (:numref:`cam_gen_linescan`).
* Can create cameras given longitude, latitude, height above datum, and roll,
pitch, yaw angles (:numref:`cam_gen_extrinsics`).
rig_calibrator (:numref:`rig_calibrator`):
* Can export the interest point matches, cameras, and the OpenCV lens
distortion model for use with ``bundle_adjust`` (:numref:`rc_ba`).
* Added documentation for how to register the produced cameras to the ground
for a planet (:numref:`msl_registration`).
* Can fix the translation and/or rotation component of a rig configuration.
* Can constrain camera positions with ``--camera_position_weight``.
* Added two more naming conventions, to help process existing data
out-of-the-box. Also for ``theia_sfm`` and ``sfm_merge``.
:numref:`rig_data_conv`.
* Thoroughly validated with an orbital rig (in addition to indoor rigs).
lronac2mosaic.py (:numref:`lronac2mosaic`):
* Run ``spiceinit`` before calling ``lronaccal``, and re-enable all
options for the latter command, which were disabled due to a bug
in ISIS that was fixed in version 7.2.
* Invoke ``spiceinit`` with ``spksmithed=true``.
* Add the option ``--spiceinit-options``.
camera_solve (:numref:`camera_solve`):
* Switched to cascade matching from brute force matching, which is much faster.
* Always reuse the Theia SfM matches.
dem_mosaic (:numref:`dem_mosaic`):
* Bugfix for option ``--use-centerline-weights``.
misc:
* Made all tools that spawn processes in parallel use the option
``--parallel-options``, with default ``--sshdelay 0.2``, to avoid
failure on certain architectures.
* For ASTER (:numref:`aster`), the model loaded by default is now linescan
rather than RPC.
* Fixed a bug in outlier filtering when the interest points are very noisy.
* Fixed a couple of runtime errors when using conda packages on OSX.
* Eliminated a procedure for cleaning the name of an input path that was
replacing two slashes with one slash, resulting in inconsistencies.
* Robustly handle 360 degree longitude offsets without classifying
georeferenced images into [-180, 180] or [0, 360] types.
* Fix an error in conversion between projections for non-Earth images.
* The North-East-Down coordinate system assumes an ellipsoid, not a sphere,
and takes into account the point elevation. This fixes some small
inaccuracies in error propagation and reporting in ``pc_align``.
* The OSX build now gets created and tested via GitHub Actions.
* Very old jitter adjustment logic was removed. The ``jitter_solve``
tool must be used instead.
* For stereo, increased ``--ip-num-ransac-iterations`` from 100 to 1000,
as for ``bundle_adjust``. This can make a difference for noisy data.
* Do not keep auxiliary files with temporary names in the work directory for
``parallel_stereo`` and ``parallel_bundle_adjust``. Use run's output
directory and proper names.
* Ensure any sequence of quaternions in a CSM linescan model is normalized
and there is no sign flip along the sequence. Such a flip was resulting
in incorrectly interpolated camera orientations.
* Auto-guess the planet for Pinhole cameras (For Earth, Moon, Mars).
* Documented the program ``view_reconstruction``
(:numref:`view_reconstruction`), with a figure.
* Switched by default to a slower but deterministic method for matching
interest points in ``bundle_adjust`` and ``parallel_stereo``. Normally this
is not a bottleneck. See ``--flann-method`` in :numref:`stereodefault-pprc`.
* Made RANSAC multi-threaded. This speeds up interest point matching.
* Added a sanity check: If the user sets ``--t_srs`` to any tool, it
must not be empty.
* Added sanity checks to ensure no mix-up of datums from different planets in
different inputs for the stereo tools, ``bundle_adjust``, ``jitter_solve``,
``mapproject``, ``cam_test``, and ``cam_gen``.
* Upgraded to Boost 1.82.0.
RELEASE 3.3.0, August 16, 2023
------------------------------
DOI: `10.5281/zenodo.8270270 `_
Stable release doc: https://stereopipeline.readthedocs.io/en/stable/index.html
New tools:
* Added ``sfm_merge`` (:numref:`sfm_merge`), a tool to merge several
SfM reconstructions that may or may not have shared images.
* Added ``sfm_submap`` (:numref:`sfm_submap`), a tool to extract
a submap from a Structure-from-Motion map in .nvm format,
as produced by ``theia_sfm`` (:numref:`theia_sfm`) or refined
with ``rig_calibrator`` (:numref:`rig_calibrator`).
* Added a couple of small Python scripts for handling ROS bags
(:numref:`ros_tools`). No ROS binaries are shipped.
* Added support for the Pleiades NEO exact linescan model
(:numref:`pleiades_neo`).
* Added ``sat_sim`` (:numref:`sat_sim`), a tool to create simulated
satellite images camera models for pinhole or linescan sensors.
* Added ``sfm_view`` (:numref:`sfm_view`), a tool for viewing orbital
Pinhole camera models. It is a modification of the ``umve`` program.
csm (:numref:`csm`):
* Added initial support for using CSM camera models with MSL Curiosity
(:numref:`csm_msl`).
parallel_stereo (:numref:`parallel_stereo`):
* Can propagate horizontal ground plane standard deviations (stddev)
specified for each camera through triangulation, obtaining the
horizontal and vertical stddev for each triangulated point.
For DigitalGlobe RPC and Pleiades linescan cameras the input
horizontal stddev can be read from camera files. A formula to go
from known CE90 to input horizontal stddev is provided
(:numref:`error_propagation`).
* Can propagate the covariances of satellite positions and
quaternions to the triangulated point cloud for Maxar
(DigitalGlobe) linescan cameras (:numref:`error_propagation`).
* Documented the pre-processing options ``--stddev-mask-kernel``
and ``--stddev-mask-thresh``
(:numref:`stereodefault-pprc`). Fixed a bug in writing
out debug images for this option.
* The cameras files used in mapprojection can be switched to other
ones for the purpose of doing stereo or reusing a stereo run
(:numref:`mapproj_reuse`).
* Added the option ``--matches-per-tile``, to attempt to guarantee
that each 1024 x 1024 tile has about this many number of matches.
* Bugfix for stereo with mapprojected Pleiades images. If the
mapprojection is done with the exact (non-RPC) cameras, stereo
must load the exact cameras when undoing the mapprojection.
bundle_adjust (:numref:`bundle_adjust`):
* Validated that given about a thousand input images acquired with three views
and much overlap, one can have very tight control of the cameras in bundle
adjustment, while improving the self-consistency of the camera configuration
(:numref:`skysat_stereo`).
* Validated the option ``--reference-dem`` for bundle adjustment. This works
even when rays obtained during triangulation are parallel.
* Added the option ``--matches-per-tile``, to attempt to guarantee that each
1024 x 1024 tile has about this many number of matches (example in
:numref:`ba_examples`).
* Bugfix for slow performance and memory usage for a large number of images.
jitter_solve (:numref:`jitter_solve`):
* Added the options ``--roll-weight`` and ``--yaw-weight`` to control the
amount of change in these angles relative to the along-track direction. To
be used with synthetic cameras created with ``sat_sim`` (:numref:`sat_sim`).
* Added a section discussing solving for jitter with synthetic camera models
(:numref:`jitter_sat_sim`).
* The solver can mix and match linescan and pinhole (frame) camera images if
the inputs are all in the CSM format (:numref:`jitter_linescan_frame_cam`).
* Added a section on how to prepare interest point matches
(:numref:`jitter_ip`).
* Validated the option ``--reference-dem`` for bundle adjustment. This works
even when rays obtained during triangulation are parallel.
* Bugfix for reverse scan direction.
* Added an example for Pleiades cameras (:numref:`jitter_pleiades`),
comparing two ways of setting ground constraints.
sfs (:numref:`sfs`):
* Created an SfS DEM of size 14336 x 11008 pixels, at 1 m pixel with
420 LRO NAC images with various illuminations and orientations.
Reliably bundle-adjusted 814 LRO NAC images in which the shadows
were observed to make a full 360 degree loop, with a total of 614k
triangulated points. Updated the documentation reflecting
latest best practices (:numref:`sfs-lola`).
* Create more detail in the reconstructed DEM in borderline lit
regions. Option: ``--allow-borderline-data``
(:numref:`sfs_borderline`).
* Added the options ``--image-list`` and ``--camera-list`` for when
the number of images becomes too large to set on the command line.
rig_calibrator (:numref:`rig_calibrator`):
* Added a detailed tutorial describing how this tool was used to
create a full 360-degree textured mesh of the JEM ISS module
(:numref:`sfm_iss`) using data acquired with two rigs (6 sensors).
* Added an example for the MSL Curiosity rover (:numref:`rig_msl`).
* Allow multiple rigs to be jointly optimized (the rig constraint
applies within individual rigs and not between them).
* Added the option ``--extra_list`` to insert additional images
close in time to some of the images already on the rig (for
the same or different rig sensor). Helps easily grow a map and
complete a rig.
* Added the option of keeping a subset of the camera poses fixed (for
when those have been validated in a smaller map).
* Images for any of the rig sensors (not just the reference one) can
be used in registration (but all must be for same sensor).
* Added the ``--save_pinhole_cameras`` option to save the optimized
cameras in ASP's Pinhole format (with no distortion for now).
* Absorb ``--rig_transforms_to_float`` into ``--camera_poses_to_float``.
* Save alongside an .nvm file a separate file having the values of
optical center point that are subtracted from each interest point
(for plotting in ``stereo_gui``).
* Merge the interest point tracks created as part of rig calibration
with the matches read from disk beforehand.
* Fix for too many valid interest point matches being filtered out.
voxblox_mesh (:numref:`voxblox_mesh`):
* Added median filtering of input point clouds (option
``--median_filter``).
* Added weighing of depth points given their distance from the
sensor (option ``--distance_weight``).
multi_stereo (:numref:`multi_stereo`):
* Left and right images can be acquired with different sensors.
* Use ``--left`` and ``--right`` to specify which stereo pairs to
run.
texrecon (:numref:`texrecon`):
* Can create a texture with images from multiple sensors.
point2dem (:numref:`point2dem`):
* Added the option ``--propagate-errors`` to grid the stddev values
computed during stereo triangulation.
* Added the option ``--input-is-projected`` to specify that the input
coordinates are already in the projected coordinate system.
stereo_gui (:numref:`stereo_gui`):
* Can read, write, edit, and overlay on top of images polygons in
plain text format in addition to the .shp format. Per-polygon
colors are supported.
* Can read nvm files whose features are shifted relative to the
optical center, if an ``.nvm`` file is accompanied by an
``_offsets.txt`` file.
* Added the option ``--preview`` to load one image at a time,
and quickly cycle through them with the 'n' and 'p' keys.
* Added the option ``--view-several-side-by-side``
to view several images side-by-side with a dialog to choose which
images to show (also accessible from the View menu).
* Added the option ``--font-size``, with the default of 9.
* Added the option ``--lowest-resolution-subimage-num-pixels`` to
control the behavior of the pyramid of subimages.
* Noticeable speedup in loading images.
* Bug fix in loading .nvm files (an SfM format).
image_align (:numref:`image_align`):
* Can find the 3D alignment around planet center that transforms the
second georeferenced image to the first one. This transform can be
used to apply the alignment to cameras and point clouds
(:numref:`image_align_ecef_trans`).
dem_mosaic (:numref:`dem_mosaic`):
* Added the option ``--fill-search-radius`` to fill nodata pixels in
a DEM with nearby valid values. This is different from
``--hole-fill-length``. See an example in :numref:`dem_mosaic_examples`.
wv_correct (:numref:`wv_correct`):
* Maxar (DigitalGlobe) WorldView-2 images with a processing (generation) date
(not acquisition date), of May 26, 2022 or newer have much-reduced CCD
artifacts, and for those this tool will in fact make the solution worse, not
better. This does not apply to WorldView-1, 3, or GeoEye-1.
* ASP builds after 2023-06-21 (so, version later than 3.2.0), will detect the
above-mentioned scenario, and will not apply any correction in that case (a
copy of the original image will be written instead and a warning will be
printed). This applies to both PAN and multi-spectral images.
corr_eval (:numref:`corr_eval`):
* Remove an excessive check. The refined/filtered disparity can be such
that left image pixel plus disparity may be outside the right image.
Don't fail in that case, but just skip the pixel, resulting in empty
correlation for that pixel.
cam_test (:numref:`cam_test`):
* Added the option ``--datum``. Useful for Pinhole cameras as those don't
know their datum.
* Added a warning if the camera center is below the datum.
misc:
* Upgraded to ISIS 8.0.0 and USGSCSM 1.7.
* Throw an error for WorldView products that are not Stereo1B or Basic1B.
That because ASP does not support orthorectified Maxar products.
* Changed the "pretend" height of the RPC cameras from 10 km
above ground to around 100 km. RPC camera models do not store this
number and it does not make a difference normally, as only ray
directions to the ground matter. Yet, .adjust
files created with an earlier version of ASP for RPC cameras
should be re-created as those use the camera center.
* The latest version of the Xerces-C XML parser became 10 times
slower than before, which may affect the speed of
processing for XML-based camera models.
* Added back the tool ``view_reconstruction``, for examining
Theia's SfM solution (:numref:`sfm`).
* The ``theia_sfm`` tool can write the optical offsets for a given
nvm file which can be used in plotting such files in ``stereo_gui``.
* Added to ``hiedr2mosaic.py`` (:numref:`hiedr2mosaic`) the option
``--web`` to invoke ``spiceinit`` with ``web=True``. Contributed
by Giacomo Nodjoumi.
* Bugfix for reading .jp2 files. Needed to ship the JPEG2000 driver
and set GDAL_DRIVER_PATH.
* Fixed a failure in ``mapproject`` with a small DEM.
* Bugfix for exporting the TheiaSfM matches in ``camera_solve``.
* The documentation of the examples chapter was broken up into
individual pages (:numref:`examples`).
RELEASE 3.2.0, December 30, 2022
--------------------------------
DOI: `10.5281/zenodo.7497499 `_
Added functionality for creation of large-scale meshes and fused
textures for small planetary bodies and indoor environments. Added
logic for rig calibration. See individual tools below.
New tools:
* ``rig_calibrator``: Calibrates a rig of N image and/or
depth+image cameras. Can also co-register and refine
intrinsics of camera images acquired with N sensors with no rig
constraint (:numref:`rig_calibrator`).
* ``multi_stereo``: Runs multiple stereo pairs and produces
a fused mesh. Uses ``parallel_stereo``, ``pc_filter``, and
``voxblox_mesh`` (:numref:`multi_stereo`).
* ``voxblox_mesh``: Fuses point clouds into a seamless oriented
mesh, with each input point given a weight according to its
reliability. Based on the third-party VoxBlox software
(:numref:`voxblox_mesh`).
* ``texrecon``: Creates seamless textured meshes. Based on
the third-party MVS-Texturing project (:numref:`texrecon`).
* ``pc_filter``: Filters outliers in point clouds created with
pinhole cameras and weighs inliers based on many criteria
(:numref:`pc_filter`).
* Added CGAL-based tools for mesh smoothing, hole-filling, remeshing,
and removal of small connected components (:numref:`cgal_tools`).
* ``jitter_solve``: A tool for solving for jitter in CSM camera
models (:numref:`jitter_solve`). It gives promising results
for CTX, Pleiades, and DigitalGlobe data. Examples are provided.
Removed tools:
* ``datum_convert``: This was an attempt at having a tool applying
a transform between datums. It is suggested to use GDAL/PROJ instead.
Note that a datum transform may require fetching transformation grids,
and without them PROJ will quietly return incorrect results.
New sensors:
* Support the Pleiades exact sensor (for 1A/1B). See :numref:`pleiades`.
Implemented as a wrapper around the CSM linescan camera model.
parallel_stereo (:numref:`parallel_stereo`):
* Added the options ``--match-files-prefix`` and
``--clean-match-files-prefix`` for reusing interest point matches
from a previous ``bundle_adjust`` or ``parallel_stereo`` run. The
"clean" interest point matches created by ``bundle_adjust`` may
have fewer outliers than what stereo can create.
* Added the option ``--keep-only`` to convert all VRT files to TIF
(e.g., D.tif), then wipe all files and subdirectories except those
specified by given suffixes.
* Added the triangulation option ``--max-valid-triangulation-error``.
* The option ``--prev-run-prefix`` can be used to start a run
with bathymetry modeling at the triangulation stage while
reusing the previous stages of a run without such modeling
(the needed aligned bathy masks are created, if needed,
at the triangulation stage, if not done, as usual, at the
preprocessing stage).
* For SGM and MGM use by default 8 threads and number of processes
equal to number of cores divided by number of threads. Less likely
to run out of memory that way.
* Added examples of using PBS and SLURM with ASP
(:numref:`pbs_slurm`).
* Added an example of processing SkySat Stereo data
(:numref:`skysat_stereo`).
* Documented better the option ``--num-matches-from-disp-triplets``
for creating dense and uniformly distributed interest point
matches. Useful for modeling lens distortion.
parallel_bundle_adjust (:numref:`parallel_bundle_adjust`):
* Do not create subdirectories or symlinks, as that results in a
massive number of small files. (Unless ``--save-vwip`` is used,
see below.)
* Do not save by default .vwip files as those take space and are
only needed to find .match files. Use the new option
``--save-vwip`` to save them. Note that these depend on individual
image pairs, so ``parallel_bundle_adjust`` saves them in
subdirectories.
bundle_adjust (:numref:`bundle_adjust`):
* Save the convergence angle percentiles for each pair of
images having matches. Useful for understating the configuration
of cameras.
* Added the option ``--tri-weight`` (default is 0) to keep triangulated
points close to their initial values. This looks more promising
than other weighing used so far at preventing the cameras from
moving when optimizing them. This assumes input cameras are
not grossly inaccurate. This adds a robust cost function
with the threshold given by ``--tri-robust-threshold``.
* Added the options ``--image-list``, ``--camera-list``,
``--mapprojected-data-list``, for when the inputs are too many to
specify on the command line.
* Added the option ``--fixed-image-list`` to specify a file having a
list of image names whose cameras should be fixed during
optimization.
* Pinhole cameras are no longer automatically reinitialized or
transformed based on GCP, but only refined given GCP. So, option
``--disable-pinhole-gcp-init`` is the default. Use one of the
options ``--init-camera-using-gcp`` (:numref:`camera_solve_gcp`),
``--transform-cameras-with-shared-gcp``,
``--transform-cameras-using-gcp`` (:numref:`sfm_world_coords`) for
manipulating cameras using GCP.
* Bugfix in initializing pinhole cameras based on GCP for off-nadir
cameras.
* When doing multiple passes (which is the default) at each pass
resume not only with clean matches but also with the cameras
optimized so far, rather than going to the originals.
* Can do multiple passes with ``--heights-from-dem``. One should
be generous with outlier removal thresholds if not sure of
the input DEM accuracy (option ``--remove-outliers-params``).
* Remove outliers based on spatial distribution of triangulated
points.
* Bugfix when the number of interest points is 4 million or more.
The algorithm would just stall. It is now replaced by an OpenMVG
algorithm.
* Fold ``--remove-outliers-by-disparity-params`` into
``--remove-outliers-params``.
* Bugfix in ``residuals_stats.txt``; the mean was correct but the
median was wrong.
* Let the default ``--heights-from-dem-weight`` be 1.0, and the
default ``--heights-from-dem-robust-threshold`` be 0.5. These
normally need tuning.
* Added the option ``--mapproj-dem``. If specified, evaluate
the disagreement of interest point matches after mapprojecting
onto this DEM, per interest point match pair, per matching image
pair, and per image. Useful at evaluating registration without
mapprojecting the images (:numref:`ba_out_files`).
* Added report files having the camera positions and orientations
before and after optimization (for Pinhole cameras only,
:numref:`ba_cam_pose`).
* Added options ``--proj-win`` and ``--proj-str`` for restricting
interest points to given area (useful when having many images
with footprints beyond area of interest).
* With ``--match-first-to-last``, write match files from earlier
to later images, rather than vice-versa. This was a bug, as
the matches were not being picked up correctly later.
* For pinhole cameras, can read .adjust files via
``--input-adjustments-prefix``, then apply them to existing .tsai
files via ``--inline-adjustments``. Until now one could do either
one or the other. Also works with ``--initial-transform``.
* Added a section describing how bundle adjustment is implemented
(:numref:`how_ba_works`).
point2dem (:numref:`point2dem`):
* Added the Tukey outlier removal method option applied to
triangulation errors (error_thresh = 75th_pct + 1.5 * (75th_pct -
25th_pct)). Also print out these percentages even for the regular
outlier removal.
bathymetry (:numref:`bathy_intro`):
* Added ``scale_bathy_mask.py``, for creating a PAN-sized image
or mask from an multispectral-sized image or mask, both for
WorldView data.
mapproject (:numref:`mapproject`):
* Exposed and documented the ``--query-projection`` option.
stereo_gui (:numref:`stereo_gui`):
* Can plot, overlay on top of images, and colorize scattered points
stored in a CSV file (:numref:`plot_csv`). Many colormap styles
are supported. See :numref:`colormap` for the list.
* Can show side-by-side colorized images with colorbars and coordinate
axes (:numref:`colorize`).
* Given a ``bundle_adjust`` output prefix, can select via checkboxes
any two images to show side-by-side, and automatically load their
match file or clean match file (options:
``--pairwise-matches`` and ``--pairwise-clean-matches``, also
accessible from the top menu).
* Visualize pairwise matches read from an nvm file, as created by
``rig_calibrator --save_nvm_no_shift``.
* Zoom to given proj win from the View menu. Useful for
reproducibility. Also accessible with the command-line option
``--zoom-proj-win``.
* Bug fix for slow overlaying of images with different datums.
* When all images have a georeference, start in georeference mode.
corr_eval (:numref:`corr_eval`):
* Bugfix for excessive memory usage with positive ``--prefilter-mode``.
* Added a note saying that the user should ensure that this tool uses
the same ``--corr-kernel`` and ``--prefilter-mode`` as
``parallel_stereo``.
* Added the option ``--sample-rate``.
cam_gen (:numref:`cam_gen`):
* Can read Planet's pinhole.json files. Then no further changes
are made to the produced pinhole camera.
* Fix a bug in output camera center determination, when an input
camera is provided.
* Bugfix in initializing pinhole cameras based on GCP for off-nadir
cameras given image corners and no prior camera.
* Added the options ``--cam-height`` and ``--cam-weight`` to try
to keep the camera at a given height above ground.
* Added the option ``--cam-ctr-weight``, to help fix the camera
center during refinement.
* If ``--optical-center`` is not set for pinhole cameras, use the
image center (half of image dimensions) times the pixel pitch.
The optical bar camera always uses the image center.
pc_align (:numref:`pc_align`):
* Fix a bug with loading very large DEMs; it was failing because of
a 32-bit integer overflow.
colormap (:numref:`colormap`):
* Added six colormaps: ``black-body``, ``viridis``, ``plasma``,
``kindlmann``, ``rainbow``, ``turbo``. Sources:
http://www.kennethmoreland.com/color-advice/ and matplotlib.
misc:
* Upgrade to C++-14, Python 3.9, latest libLAS, OpenCV 4, PCL 1.11,
Boost 1.72, ISIS 7.1.0, GDAL 3.5, and PROJ 9.1.0. The latter has a
whole new API, intended to handle properly transformations among
datums.
* The ``lronaccal`` tool in ISIS 7.1.0 appears buggy. Try using
an earlier ISIS version if this is needed.
* Replaced in some locations ASP's homegrown coordinate transformation
logic with what is in PROJ.
* Added the option of using the CSM camera with DigitalGlobe WorldView
images in bundle adjustment, stereo, and mapprojection (use with
``--t dg``). Option name is ``--dg-use-csm`` and must be set
consistently for all tools. This speeds up ground-to-image
computation by a factor of about 20 (which helps with
mapprojection and bundle adjustment). The result of projecting
into the camera changes by less than 0.015 pixels from before if
using this option. That is due to the fact that different
methods are used for position and orientation interpolation.
The ``cam_test`` option ``--dg-vs-csm`` can be
used for evaluating this discrepancy. Each of these methods is
consistent with itself to within 2e-8 when it comes to projecting
from camera to ground and back.
* Increased the cache size to 1 GB per process for each ASP tool.
Added the option ``--cache-size-mb``, to set this. Made the
warning message refer to this option when the limit is
hit. Documented this for all tools.
* Using ``-t pinhole`` now invokes the same logic as ``-t
nadirpinhole --no-datum``, which is same code path used by other
sessions. This wipes an old alternative approach. Eliminated much
other duplicated and mutated code for various sessions at the
preprocessing stage.
* Bugfix for D.tif VRTs as created by ``parallel_stereo``.
* Allow whitespaces in stereo.default before option names.
* Fix a crash in ISIS for international users by setting for all ASP
programs the environmental variables LC_ALL and LANG to en_US.UTF-8.
* parallel_stereo will accept (but ignore) Unicode in stereo.default.
* Eliminate internal fudging of ``--ip-uniqueness-threshold``,
and make it equal to 0.8 for both ``stereo`` and
``bundle_adjust``. This was shown to increase the number of
interest points in situations when not enough were found.
* The ``historical_helper.py`` program expects a local installation
of ImageMagick and the ``convert`` tool (available on most systems
normally).
RELEASE 3.1.0, May 18, 2022
----------------------------
DOI: `10.5281/zenodo.6562267 `_
New camera additions:
* Added support for the USGSCSM Frame, SAR, and PushFrame sensors
(until now just the Linescan sensor was supported), together
with documentation and examples (for Dawn, MiniRF, and WAC,
respectively).
* Added support for ISIS SAR cameras, together with an example in
the doc.
* Added support for the PeruSat-1 linescan camera model (so far just
the RPC model was supported for this satellite).
New tool additions:
* Added the program ``corr_eval``, for evaluating the quality of
produced correlation with several metrics. See also the new option
``--save-left-right-disparity-difference`` in ``parallel_stereo``.
* Added the program ``otsu_threshold`` for computing an image
threshold. It can be used for separating land from water (in
WorldView multispectral NIR bands), and shadowed from lit areas
(in Lunar images).
* The program ``parallel_stereo`` can function as purely an image
correlation tool, without assuming any camera information, via
the option ``--correlator-mode``.
* Added the program ``image_align``. Used to align two images or
DEMs based on interest point matches or disparity, with given
alignment transform type (translation, rigid, similarity, affine,
and homography).
isis:
* Using ISIS 6.0.0.
csm:
* Save the camera state on multiple lines. On reading both the
single-line and multiple-line formats are accepted.
* Bundle adjustment, mapproject, and SfS with the CSM model can be
7-15 times faster than done with the corresponding ISIS mode
for linescan cameras (the latter as reimplemented in ASP itself).
It is strongly suggested to use CSM for large-scale processing.
* Bugfix in CSM linescan implementation for some LRO NAC sensors.
Also replaced the fixed-point method with the secant method in the
ground-to-image logic for CSM linescan cameras, which is faster.
parallel_stereo:
* Many fixes for reliability of stereo with local epipolar alignment.
* Added the option ``--resume-at-corr``. Start at the correlation stage
and skip recomputing the valid low-res and full-res disparities for
that stage.
* Bugfix: Eliminate edge artifacts in stereo refinement (for
subpixel modes 1, 2, and 3).
* Print in stereo_pprc the estimated convergence angle for the given
stereo pair (for alignment methods affineepipolar, local_epipolar, and
homography).
* Added the option ``--prev-run-prefix``, which makes parallel_stereo
start at the triangulation stage while using previous stages
from this other run. The new run can have different cameras, different
session (rpc vs dg, isis vs csm), different bundle
adjustment prefix, and different bathy planes (if applicable).
* Added option ``--save-left-right-disparity-difference`` to save the
discrepancy between left-to-right and right-to-left
disparities, which may help with filtering unreliable
disparities.
* Interest point matching with mapprojected images now happens
at full resolution, which results in a more reliable process
when there are clouds or if fine features are washed out at
low resolution.
* Expanded the doc to address a big gotcha: if left and right
mapprojected images have somewhat different resolutions, then an
immense disparity search range can result.
* Added the option ``--max-disp-spread`` to limit the spread of the
disparity to this value (useful with clouds in images).
* Added the option ``--ip-filter-using-dem`` to filter as outliers
interest point matches whose triangulated height differs by more
than given value from the height at the same location for the
given DEM.
* Added a doc section on handling of images with clouds.
* Disable by default velocity aberration and atmospheric refraction
corrections. These are not accurate enough and cause issues with
convergence of bundle adjustment. Can be enabled with
``--enable-correct-velocity-aberration`` and
``--enable-correct-atmospheric-refraction``. These improve results
however with Digital Globe cameras if not doing bundle
adjustment. (Note that these are still hard-coded as enabled for
optical bar camera models. This would require further study.)
* Added ready-made ASTER and LRO NAC examples with sample images,
cameras, commands, and outputs, all available for
download. Contributions of more examples are welcome. See
https://github.com/NeoGeographyToolkit/StereoPipelineSolvedExamples.
* Bugfix for ASTER cameras; this was fully broken.
* ASP's SGM and MGM algorithms will always use the cross-check for
disparity by default, to improve the quality, even if that takes
more time. It can be turned off with ``--xcorr-threshold -1``.
* Filter outliers in low-resolution disparity D_sub.tif. Can be
turned off by setting the percentage in ``--outlier-removal-params``
to 100.
* Filtering of interest points based on percentiles (using also
``--outlier-removal-params``).
* Folded ``--remove-outliers-by-disparity-params`` into
``--outlier-removal-params``.
* Bugfix in disparity search range handling when it is large.
* For Linux, in each tile's directory write the elapsed runtime and
memory usage to ``--resource-usage.txt``.
* Removed the ``--local-homography`` option, as it is superseded by
``--alignment-method local_epipolar``, which blends the local results.
* The stereo tool is deprecated, and can be used only with the
ASP_BM classical block-matching algorithm when invoked without
local epipolar alignment. Use parallel_stereo instead.
* Added the experimental ``--gotcha-disparity-refinement`` option, under
NASA proposal 19-PDART19_2-0094.
bundle_adjust:
* Added the option ``--apply-initial-transform-only`` to apply an initial
transform to cameras while skipping image matching and other
steps, making the process much faster.
* Added the option ``--auto-overlap-params`` to automatically compute
which camera images overlap, if a DEM and camera footprint
expansion percentage are given.
* Added the option ``--max-pairwise-matches`` to put an upper limit on
the number of matches, as a large number can slow down bundle
adjustment. The default is 10000, likely a large overestimate (but
this includes potential outliers). If creating interest points
from disparity using ``--num-matches-from-disp-triplets``, similar
values should be used for both of these options.
* Stop printing warnings about failed triangulation if their number
goes over 100.
* Rename verbose ``final_residuals_no_loss_function_pointmap_point_log.csv``
to ``final_residuals_pointmap.csv`` and
``final_residuals_no_loss_function_raw_pixels.txt`` to
``final_residuals_raw_pixels.txt``, etc.
* Document the useful initial and final ``residuals_stats.txt`` files.
* Added new options for reusing a previous run:
``--match-files-prefix`` and ``--clean-match-files-prefix``.
sfs:
* SfS was made to work with any camera model supported by ASP,
including for Earth. For non-ISIS and non-CSM cameras, the option
``--sun-positions`` should be used.
* Exhaustively tested with the CSM model. It is very recommended to
use that one instead of ISIS .cub cameras, to get a very large
speedup and multithreading.
* Added a new ``--gradient-weight`` parameter, constraining the
first-order derivatives. Can be used in combination with the
``--smoothness-weight`` parameter which constrains the second-order
derivatives. The goal is to avoid a noisy solution without losing
detail.
* Much work on expanding the documentation.
mapproject:
* If the input image file has an embedded RPC camera model, append
it to the output mapprojected file. (Which makes stereo with
mapprojected images work correctly in this case.)
* Always start a process for each tile. The default tile size
is set to 5120 for non-ISIS cameras and to 1024 for ISIS. Use
a large value of ``--tile-size`` to use fewer processes.
bathymetry:
* Can have different water surfaces in left and right images, so the
triangulating rays bend at different heights.
* ``bathy_plane_calc`` can use a mask of pixels above water to find the
water-land interface, and also a set of actual ``lon, lat, height``
measurements.
* Added documentation for how to find water level heights at given
times and coordinates using National Ocean Service's tidal zoning
map.
pc_align:
* Add alignment method similarity-point-to-plane. It works better
than similarity-point-to-point at finding a scale between the
clouds when they have a large shift.
* Bugfix for alignment methods point-to-point and
similarity-point-to-point.
* Use RANSAC with ``--initial-transform-from-hillshading``, for increased
robustness to outliers. Replaced
``--initial-transform-outlier-removal-params`` (based on percentiles)
with ``--initial-transform-ransac-params``.
dem_mosaic:
* Add the option ``--tap``, to have the output grid be at integer
multiples of the grid size (like the default behavior of
``point2dem`` and ``mapproject``, and ``gdalwarp`` when invoked
with ``-tap``). If not set, the input grids determine
the output grid. (The produced DEM will then extend for an
additional 0.5 x grid_size beyond grid centers on its perimeter.)
* Do not allow more than one of these operations in a given
dem_mosaic invocation: fill holes, blur, or erode. These won't
work when also having more than one input DEM, reprojection is
desired, or priority blending length is used. This is done to
avoid confusion about order of operations, and the fact that
different input DEMs can have different grid sizes and hence the
input parameters have different effects on each.
* Bugfix for hole-filling and blurring. Tile artifacts got removed.
stereo_gui:
* Can cycle through given images from the View menu, or with the 'n'
and 'p' keys, when all images are in the same window.
* Can save a shapefile having points, segments, or polygons. (These
are distinct classes for a shapefile; the shapefile format
requires that these not be mixed in the same file.)
* Noticeable speedup when changing display mode (e.g., from
side-by-side to overlaid).
* Bugfix when overlaying shapefiles with different georeferences.
* Polygon layers can be set to desired colors from the left pane,
when overlaid.
* On startup, draw rectangular regions corresponding to values of
``--left-image-crop-win`` and ``--right-image-crop-win``, if these
are passed in as command line arguments together with two images.
* Quietly accept parallel_stereo options and pass them on if this tool
is invoked from the GUI.
image_calc:
* Add the option ``--no-georef`` to remove any georeference
information in the output image (useful with subsequent GDAL-based
processing).
* Added the option ``--longitude-offset`` to help to deal with the
fact that ASP-produced DEMs and orthoimages may have the
longitudes in [0, 360] while users may prefer [-180, 180].
* Bugfix: The ``--input-nodata`` value, if set, now overrides the
value set in the metadata (the previous value then becomes valid).
Misc:
* Added the tool ``parse_match_file.py`` to convert a binary match file
to text and vice-versa.
* Add the tool ``cam_test`` to compare two different camera models
for the same image.
* Stereo and bundle adjustment with RPC cameras now query the RPC
model for the datum.
* The ``cam2rpc`` program saves its datum which is read when needed by
the RPC model loader.
* Add the option ``--triangulation-error-factor`` to ``point2las`` to allow
point cloud triangulation errors multiplied by this factor and
rounded/clamped appropriately to be stored in the 2-byte intensity
field in the LAS file.
* Make symlinks relative in ``parallel_bundle_adjust`` for portability.
* The mapprojected image saves as metadata the adjustments it was
created with.
* Save the low-resolution triangulated point cloud (``PC_sub.tif``) in
stereo_corr (based on filtered ``D_sub.tif``).
* The ``ipmatch`` program can take as input just images, with the
.vwip files looked up by extension.
* Bugfix in handling projections specified via an EPSG code.
* Bugfix when some environmental variables or the path to ASP
itself have spaces. (It happens under Microsoft WSL.)
* Bugfix for the "too many open files" error for large images.
* Add the build date to the ``--version`` option in the ASP tools
and to the log files.
* Bugfix in the original author's MGM implementation, accepted by
the author.
RELEASE 3.0.0, July 27, 2021
----------------------------
DOI: `10.5281/zenodo.5140581 `_
New functionality:
* Added new stereo algorithms: MGM (original author implementation),
OpenCV SGBM, LIBELAS, MSMW, MSMW2, and OpenCV BM to complement
the existing ASP block matching, SGM, and MGM algorithms. See
https://stereopipeline.readthedocs.io/en/latest/next_steps.html
for usage. These will be further refined in subsequent releases.
* Added the ability to perform piecewise local epipolar alignment
for the input images, to be followed by a 1D disparity search (for
non-mapprojected images), as suggested by the Satellite Stereo
Pipeline (S2P) approach. This is still somewhat experimental.
* Added the ability for a user to plug into ASP any desired stereo
program working on image clips to which epipolar alignment has
been applied (as is customary in the computer vision community)
without rebuilding ASP.
* Added support for shallow-water bathymetry, so creation of terrain
models with correct depth determination for well-resolved areas under
shallow water. To be used with dg, rpc, and nadirpinhole cameras.
* Added two supporting tools for this: bathy_plane_calc and
bathy_threshold_calc.py.
* Added CCD artifact corrections for a few WV02 band 3 multispectral
images. Apart from the systematic artifacts corrected by this
logic, these images have a high-frequency unique pattern, and also
jitter, which are not corrected for. Also added tools and
documentation to easily tabulate more multispectral bands and TDI.
isis:
* Upgraded to ISIS 5.0.1.
* Ship a full Python 3.6 runtime, as expected by ISIS.
csm:
* Upgraded to USGSCSM 1.5.2 (ASP's own build of it has an additional
bugfix for LRO NAC not present in the conda-forge package).
* Validated the CSM model for CTX, HiRISE, and LRO NAC cameras.
* Added documentation for how to create CSM models from .cub
cameras.
* Export the state of a CSM camera after bundle adjustment and
pc_align (only for linescan cameras supported by ISIS).
parallel_stereo
* Will now throw an error if ``--threads`` is passed in, whose behavior
was not defined.
* Bugifx for Python 3.
bundle_adjust:
* Added the option ``--heights-from-dem-robust-threshold``.
* Added the option ``--save-intermediate-cameras`` to save the cameras
at each iteration.
* Added the option ``--match-first-to-last`` to match the first several
images to several last images by extending the logic of
``--overlap-limit`` past the last image to the earliest ones.
point2las
* Remove outliers by using a percentile times a factor, in a way
analogous to point2dem.
convert_pinhole_model:
* Improve the accuracy of the RPC approximation distortion and
undistortion.
sfs:
* Added the option ``--shadow-threshold`` to be able to specify
a single shadow threshold for all images. Also added
``--custom-shadow-threshold-list``.
* Added the option ``--robust-threshold`` for situations when the
measured image intensity is unreliable.
* Added the option ``--estimate-height-errors`` to estimate the
uncertainty in height at each computed SfS DEM pixel.
It can be customized via ``--height-error-params``.
* Added an auxiliary tool named sfs_blend to replace SfS
pixels with ones from the original LOLA DEM in permanently
shadowed regions.
stereo_gui:
* Added the ability to find the contour of a georeferenced image at
a given threshold. (It can be later edited, saved to disk, etc.)
* Bugifxes for polygon drawing logic.
* Much more responsive for overlaying many images.
image_calc:
* Support the sign function (can help in creating masks).
pc_align:
* Bugifx for ``--initial-transform-from-hillshading`` with outlier
removal.
* Add the ``--initial-transform-outlier-removal-params`` to control
outlier removal when finding matches between DEMs to align
using features detected in hillshaded images or selected
manually.
* Added ``--initial-rotation-angle``, to initialize the alignment
transform as the rotation with this angle (in degrees) around
the axis going from the planet center to the centroid of the point
cloud.
Misc
* Moved the daily build to the release area on GitHub, at
https://github.com/NeoGeographyToolkit/StereoPipeline/releases
* Upgraded to GDAL 2.4 and PROJ4 5.2.0. (ISIS constrains updating to
newer versions of these.)
* Added the option ``--ip-per-image`` to bundle adjustment and stereo, to
detect roughly how many interest points should be found per image
(only a small fraction of them may eventually match across images).
* The ``--min-triangulation-angle`` in stereo must be always positive if
set by the user. Can be set to something very small if desired.
This is a bug fix for this rarely used option (before, when set to
0 it would just reset itself to some internal non-small value).
* Bugifx for the VisionWorkbench implementation of the
Levenberg-Marquardt algorithm, it was giving up prematurely in
challenging situations.
* Bugifx for affine epipolar alignment. Use the OpenCV function
for finding the alignment matrix instead of the ASP one as OpenCV
can filter outliers which cause issues on rare occasions.
* Bugfix: Do not allow a full run to take place in a directory
where a clip was run, as that will produce incorrect results.
RELEASE 2.7.0, July 27, 2020
----------------------------
New functionality
* Support for ISIS version 4.1.10. Please set ISISDATA instead of
ISIS3DATA with this version of ISIS and ASP.
* Support for the Community Sensor Model
(https://github.com/USGS-Astrogeology/usgscsm)
* Ability to install ASP with conda. See INSTALLGUIDE.rst for details.
* Moved the documentation to ReStructured Text, and Sphinx-Doc. See
the documentation at: https://stereopipeline.readthedocs.io
* As of this release, we have transitioned to the
`Semantic Versioning 2.0.0 standard `_ for ASP.
bundle_adjust
* Can first create interest point matches among mapprojected images
(automatically or manually) and use those to create matches among
the unprojected images when the latter are so dissimilar in
perspective that the direct approach fails. See ``--mapprojected-data``.
stereo_gui
* Bug fix when zooming all images to same region when the region is
such that all images are seen fully.
sfs
* Added a new very challenging example at the South Pole with drastic
illumination changes and using a non-stereo DEM as initial guess.
* Fixed a bug with craters missing under low light.
* Fixed a bug with computation of exposures in terrain with many shadows.
* Print the Sun azimuth angle for all images (useful for sorting them
by illumination conditions).
hiedr2mosaic.py
* When hijitreg finds no match points between two CCDs, the program now
emits a warning message to STDOUT with a suggestion to perhaps
fiddle with hijitreg manually, and rather than fail with a
mysterious exception warning, now gracefully falls back to
assuming that there is no jitter correction between the two
CCDs that had no matches.
point2dem
* Use outlier filtering when computing the bounding box of a DEM.
The same option ``--remove-outliers-params`` controls this
just as for removing outliers by triangulation error.
mapproject
* Fixed a bug when finding the extent of the mapprojected
image when the DEM to project onto spans the whole planet.
point2mesh
* Only meshes in .obj format are created. This format can be opened
in Meshlab, Blender, or some other mesh viewer.
* The osgviewer program is no longer shipped.
* Fixed a bug with invalid points not being filtered.
* Fixed a bug with insufficient precision (now it can be set
by the user and defaults to 17 digits).
* Added the option ``--texture-step-size`` to control the sampling
rate for the texture, in addition to the -s option that controls
the sampling rate for the point cloud.
Misc
* Updated to C++ 11.
* Added phase subpixel correlation accuracy parameter.
RELEASE 2.6.2, June 15, 2019
----------------------------
DOI: https://doi.org/10.5281/zenodo.3247734
New satellites
* Added support for SkySat, together with a detailed example,
including how to jointly align and optimize cameras in respect
to a reference DEM, while optionally refining the intrinsics.
This approach may be helpful for other images obtained with frame
cameras and uncertain positioning information.
* Added support for CORONA KH-4B, KH-7, and KH-9 declassified images
and their panoramic (optical bar) camera models, as well as using
and optimizing camera models with RPC distortion (only RPC is
supported for KH-7 because it is a linescan camera). An example
is in the documentation.
New tools
* Added parallel_bundle_adjust which computes image statistics and
IP matching in a parallel manner similar to parallel_stereo.
* Added the cam_gen tool to create a correctly oriented pinhole
camera model given camera intrinsics, lon-lat coordinates of the
corners (or some other pixels), and optionally a ground truth
DEM. It can also parse SkySat's video/frame_index metafile to get
this data. It can also take as input any camera supported by ASP
via ``--input-camera`` and create a most-similar pinhole camera
model with given intrinsics.
* Added the coverage_fraction tool to provide a coverage estimate
of the results of a stereo call.
* Added the image_mosaic tool which merges together images based on
interest point matches. Can be used to stitch together Corona
scanned images.
* Added a new tool, n_align, to jointly align n clouds
(re-implemented from Matlab, works well for small clouds that are
close to each other).
stereo_rfne
* Added the option to run a non-SGM subpixel option after
running SGM/MGM.
* Added the phase correlation subpixel option. This is a Fourier
transform based method.
pc_align
* Added a new approach to finding an initial transform between
clouds, when they are DEMs, that may be more robust to large
scale or translation changes, or to noise. It is based on
hillshading the DEMs and finding interest point matches among
them, which are then used to find the transform. Can be invoked
with ``--initial-transform-from-hillshading`` .
Supported transforms are: 'similarity' (rotation + translation +
scale), 'rigid' (rotation + translation) and 'translation'.
* Added the expression of the Euler angles in the North-East-Down
coordinate system around the center of gravity of the source
cloud.
* Bug fix: intersection of bounding boxes of the clouds takes
into account the initial transform applied to the source points.
* Added a new alignment algorithm, based on
https://github.com/IntelVCL/FastGlobalRegistration
It can be invoked with ``--alignment-method fgr``. It can perform
better than ICP when the clouds are close enough to each
other but there is a large number of outliers, when it can
function with very large ``--max-displacement``. It does worse if the
clouds need a big shift to align.
bundle_adjust
* Two passes of bundle adjustment (with outlier filtering after
* first pass) is now the default.
* The flag ``--skip-rough-homography`` is on by default as it usually
gives more reliable results. Use ``--enable-rough-homography``
to turn this option back on (useful when the footprint on the
ground and difference in perspective are large).
* The flag ``--disable-tri-ip-filter`` is also the default as input
cameras may not be reliable enough for this filter. Can be
enabled back with ``--enable-tri-ip-filter``.
* Added the ``--intrinsics-limits`` option to manually specify
intrinsic parameter limits.
* Added the ``--num-random-passes`` option to allow repeat solving
attempts with randomly distorted initial parameters.
* Added option to automatically guess overlapping images from
Worldview style XML camera files.
* Removed the non-Ceres bundle adjustment options.
* Added the option to share or not share selected intrinsic parameters
between pinhole cameras when optimizing intrinsics.
* Improvements in solving simultaneously for both intrinsics and
extrinsics of n camera images if underlying ground truth
terrain in the form of a DEM or LIDAR point cloud is
present. After this bundle adjustment, pairwise stereo and DEM
creation, the DEMs are well-aligned to the ground truth.
* Added the flag ``--reference-terrain-weight`` which, when increased,
helps align better camera images to a given reference terrain.
* Added the option ``--heights-from-dem``. It is very helpful in
determining an unknown focal length and distortion parameters
for pinhole cameras.
It can be used together with ``---heights-from-dem-weight``.
* Bug fix in outlier filtering for n images.
* Updated Ceres version from 1.11 to 1.14. When optimizing with
multiple threads, results now vary slightly from run to run.
Results from single threaded runs are deterministic.
* Added a new ``--parameter-tolerance`` option. Stop when the relative
error in the variables being optimized is less than this.
* Documented the ability to create a roughly positioned
pinhole camera model from an image if its intrinsics and the
longitude and latitude (and optionally height) of its corners
(or some other pixels) are known.
* When multiple passes happen with outliers removed, match files
are not over-written, but a new clean copy of them gets saved.
* Renamed ``--create-pinhole-cameras`` to ``--inline-adjustments``, and
distortion_params to other_intrinsics. This is needed since
for the panoramic model there will be other intrinsic
parameters as well.
* Added the option ``--forced-triangulation-distance`` for when one
really needs to triangulate with poor cameras. Can be used with
a very small but positive value of ``--min-triangulation-angle``.
* Added the option ``--transform-cameras-using-gcp``. If there
are at least two images with each having at least 3 GCP
(each GCP need not show in more than one image), use this
to convert cameras from an abstract coordinate system to world
coordinates.
* Increased the default ``--num-ransac-iterations`` to 1000 from 100
so that the solver tries harder to find a fit.
Increased default ``--ip-inlier-factor`` from 1/15 to 0.2 to help
with getting more interest points for steep terrain.
* Increased the default ``--ip-uniqueness-threshold`` from 0.7
to 0.8 to allow for more interest points.
* Option to filter interest points by elevation limit and lon-lat limit
after each pass of bundle adjustment except the last.
dem_mosaic
* Added normalized median absolute deviation (NMAD) output option.
* Added the option ``--force-projwin`` to create a mosaic filling
precisely the desired box specified via ``--t_projwin``.
stereo_gui
* Added the ability to manually reposition interest points.
* Can now show non-synchronous .match files (that is, each IP
need not be present in all images).
* Added basic functionality for drawing/editing/merging polygons on
* top of georeferenced images or DEMs. The polygons can be saved as
shape files, and then used to cut out portions of images with GDAL.
* Added the option ``--nodata-value``. Pixels with value less than
or equal to this are shown as transparent.
* Added the ability to view .vwip files (specify one per image).
* Can view (but not edit) GCP files, via ``--gcp-file`` (creating
GCP is supported in a separate mode, per the doc).
* The option ``--dem-file`` specifies a DEM to use when creating
manually picked GCP and ``--gcp-file`` specifies the name of
the GCP file to use upon saving such GCP.
mapproject
* Added the ``--nearest-neighbor`` option to use that interpolation
method instead of bicubic. This is better for labeled images
which should not be interpolated.
convert_pinhole_model
* Can create RPC distortion models of any degree, which can be
further optimized in bundle_adjust. Old RPC distortion files are
still supported throughout ASP, but not functionality which
optimizes them. They can be approximately converted to new type
RPC distortion files with this tool if optimization is desired.
Misc
* Compiled against USGS ISIS version 3.6.0.
* Expanded the documentation explaining how to align cameras
to a DEM manually (or initialize such cameras) by selecting
matching points between the images and the DEM.
* The stereo tools and bundle_adjust will now cache image
statistics and interest points to files on disk.
* In stereo and bundle_adjust, when images or cameras are newer
than the match files, the latter get recomputed unless the tools
are invoked with ``--force-reuse-match-files``.
* Added a fix to make stereo work with the ZY3 satellite.
* For stereo and bundle_adjust, added the ``--no-datum`` option to
find interest points without assuming a reliable datum exists,
such as for irregularly shaped bodies. Added the related
option ``--skip-rough-homography`` to not use the datum in
rough homography computation. Added the option
``--ip-num-ransac-iterations`` for finer control of interest
point matching. Added ``--ip-triangulation-max-error`` to control
the triangulation error.
* The cam2rpc tool accepts ``--t_srs`` and ``--semi-major-axis`` as
alternatives to ``--datum`` and ``--dem-file``.
* Add option ``--theia-overrides`` to camera_solve to make it easier
to customize its behavior via flags.
* Added an explanation for how the pinhole model works.
RELEASE 2.6.1, August 13, 2018
------------------------------
New satellites
* Support Cartosat-1 and Perusat-1 RPC cameras.
New tools
* Added convert_pinhole_model, to convert between various
existing such models.
* Added camera_footprint as a helpful utility to show where
images will project on to the ground.
* Documented and improved the ipfind and ipmatch tools.
ipfind is used to detect interest points in input images,
either to generate .vwip files for other tools or to
experiment with different IP finding settings.
ipmatch matches the IPs contained in .vwip files to
create .match files.
New camera models
* Added simple atmospheric refraction correction to the
DG and SPOT5 camera models. This can be enabled
using the "--enable-correct-atmospheric-refraction" option.
* Added support for pinhole camera models where the lens
distortion is given by an RPC model (rational polynomial
coefficients), of degrees 4, 5, and 6. Such a model may be more
expressive than existing ones, and its coefficients can now be
optimized using bundle adjustment. An initial model can be
created with convert_pinhole_model.
stereo_corr
* Added new options for post-SGM subpixel stereo. Previously only a
parabola method was used.
* Added option to perform cross-correlation checks on multiple
resolution levels while using SGM/MGM.
* Added option ``--corr-search-limit`` to constrain the automatically
computed correlation search range.
* Added ``--corr-memory-limit-mb`` option to limit the memory usage of
the SGM/MGM algorithms.
* Improved search range estimation in nadir epipolar alignment
cases. Added ``--elevation-limit`` option to help constrain this
search range.
* Added hybrid SGM/MGM stereo option.
* Improvements to SGM search range estimation.
* Added ``--min-num-ip`` option.
bundle_adjust
* Added the ability to optimize pinhole camera intrinsic
parameters, with and without having a LIDAR or DEM ground truth
to be used as reference (the latter is recommended though).
* The tool is a lot more sensitive now to ``--camera-weight``,
existing results may change a lot.
* Added the parameters ``--rotation-weight`` and ``--translation-weight``
to penalize large rotation and translation changes.
* Added the option ``--fixed-camera-indices`` to keep some cameras
fixed while optimizing others.
* Can read the adjustments from a previous invocation of this
program via ``--input-adjustments-prefix``.
* Can read each of pc_align's output transforms and apply it
to the input cameras via ``--initial-transform``, to be able to
bring the cameras in the same coordinate system as the aligned
terrain (the initial transform can have a rotation, translation,
and scale). If ``--input-adjustments-prefix`` is specified as well,
the input adjustments are read first, and the pc_align
transform is applied on top.
* Renamed ``--local-pinhole`` to ``--create-pinhole-cameras``.
* Added the parameter ``--nodata-value`` to ignore pixels at and below
a threshold.
* Added the ability to transfer interest points manually picked in
mapprojected images to the the original unprojected images via
``--mapprojected-data``.
* Added the flag ``--use-lon-lat-height-gcp-error``. Then, if using
GCP, the three standard deviations are interpreted as applying
not to x, y, z but to latitude, longitude, and height above
datum (in this order). Hence, if the latitude and longitude are
known accurately, while the height less so, the third standard
deviation can be set to something much larger.
* Added the ability to do multiple passes of bundle adjustment,
removing outliers at each pass based on reprojection error and
disparity (difference of pixel value between images). This
works for any number of cameras. Match files are updated with
outliers removed. Controlled via ``--num-passes``,
``--remove-outliers-params`` and ``--remove-outliers-by-disparity-params``.
* Added the option ``--save-cnet-as-csv``, to save the control
network containing all interest points in the format used by
ground control points, so it can be inspected.
* If ``--datum`` is specified, bundle_adjust will save to disk
the reprojection errors before and after optimization.
stereo_gui
* Can view SPOT5 .BIL files.
pc_align
* Add the ability to help the tool with an initial translation
specified as a North-East-Down vector, to be used to correct known
gross offsets before proceeding with alignment. The option is
``--initial-ned-translation``.
* When pc_align is initialized via ``--initial-transform`` or
``--initial-ned-translation``, the translation vector is now computed
starting from the source points before any of these initial
transforms are applied, rather than after. The end point of this
vector is still the source points after alignment to the
reference. This is consistent with the alignment transform output
by the tool, which also is from the source points before any
initial alignment and to the reference points.
* The translation vector was expressed incorrectly in the
North-East-Down coordinate system, that is now fixed.
dem_mosaic
* If the -o option value is specified as filename.tif, all mosaic will be
written to this exact file, rather than creating tiles.
point2dem
* Added the ability to apply a filter to the cloud points in each circular
neighborhood before gridding. In addition to the current weighted average
option, it supports min, max, mean, median, stddev, count, nmad,
and percentile filters. The ``--search-radius-factor`` parameter can
control the neighborhood size.
* Sped up hole-filling in ortho image generation. If this creates
more holes than before, it is suggested to relax all outlier filtering,
including via ``--remove-outliers-params``, median filtering, and erosion.
* Added the option ``--orthoimage-hole-fill-extra-len`` to make hole-filling
more aggressive by first extrapolating the cloud.
datum_convert
* Rewrote the tool to depend on the Proj.4 HTDPGrids grid shift system.
This fixed some situations where the tool was not working such as WGS84/NAD83
conversions and also added support for datum realizations (versions).
* Vertical datum conversion is only supported in simple cases like D_MARS to MOLA.
* Even with HTDPGrids, datum support with the Proj.4 library is poor and will
hopefully be improved with future releases. Until then try to get external
verification of results obtained with the datum_convert tool.
wv_correct
* Supports WV2 TDI = 32 in reverse scan direction.
Misc
* We now compile against USGS ISIS version 3.5.2.
* The tools mapproject, dem_mosaic, dg_mosaic, and wv_correct support
the ``--ot`` option, to round the output pixels to several types of
integer, reducing storage, but perhaps decreasing accuracy.
* The tools mapproject and image_calc support the ``--mo`` option to
add metadata to the geoheader in the format 'VAR1=VAL1 VAR2=VAL2',
etc.
* Handle properly in bundle_adjust, orbitviz, and stereo
with mapprojected images the case when, for RPC cameras,
these coefficients are stored in _RPC.TXT files.
* Support for web-based PROJ.4 strings, e.g.,
point2dem ``--t_srs`` http://spatialreference.org/ref/iau2000/49900/
* Added ``--max-output-size`` option to point2dem to prevent against
creation of too large DEMs.
* Added image download option in hiedr2mosaic.py.
* Bug fix in cam2map4stereo.py when the longitude crosses 180 degrees.
* Added support for running sparse_disp with your own Python installation.
* Bug fix for image cropping with epipolar aligned images.
* The sfs tool supports the integrability constraint weight from Horn 1990.
* The software works with both Python versions >= 2.6 and 3.
RELEASE 2.6.0, May 15, 2017
---------------------------
New stereo algorithms
* ASP now supports the Semi Global Matching (SGM) and
More Global Matching (MGM) stereo algorithms.
They do particularly well for Earth imagery, better
than the present approaches. They can be invoked with
``--stereo-algorithm`` 1 and 2 respectively.
New tools
* Added cam2rpc, a tool to create an RPC model from any
ASP-supported camera. Such cameras can be used with ASP for
Earth and planetary data (stereo's ``--datum`` option must be set),
or passed to third-party stereo tools S2P and SETSM.
* Added correct_icebridge_l3_dem for IceBridge.
* Added fetch_icebridge_data for IceBridge.
parallel_stereo
* By default, use as many processes as there are cores, and one
thread per processes.
stereo_pprc
* Large speedup in epipolar alignment.
* Improved epipolar alignment quality with standard pinhole cameras.
* Added the options ``--ip-inlier-threshold`` and ``--ip-uniqueness-threshold``
for finer-grained control over interest point generation.
* Fix a bug with interest point matching the camera model is RPC
and the RPC approximation domain does not intersect the datum.
stereo_corr
* Added new option ``--stereo-algorithm``. Choices 1 and 2 replaces
the standard integer correlator with a new semi-global matching
(SGM) correlator or an MGM correlator respectively. SGM/MGM is
slow and memory intensive but it can produce better results
for some challenging input images, especially for IceBridge.
See the manual for more details.
stereo_tri
* Added the option ``--min-triangulation-angle`` to not triangulate
when rays have an angle less than this.
stereo_gui
* Zooming in one image can trigger all other side-by-side images to
zoom to same region.
* Clicking on a pixel prints image pixel indices, value, and image
name. Selecting a region with Control+Mouse prints its bounds in
pixels, and, if georeferenced, in projected and degree units.
* Added a 1D profile tool for DEMs.
* Can visualize the pixel locations for a GCP file (by interpreting
them as interest points).
* Can save a screenshot of the current view.
* If all images are in the same window, can show a given image above
or below all others. Also can zoom to bring any image in full view
(from the list of images on the left).
* Options to set the azimuth and elevation when showing hillshaded
images.
dem_mosaic
* Added the option ``--dem-blur-sigma`` to blur the output DEM.
* Use by default ``--weights-exponent 2`` to improve the blending,
and increase this to 3 if ``--priority-blending-length`` is specified.
* Added the options ``--tile-list``, ``--block-max``, and ``--nodata-threshold``.
* Display the number of valid pixels written.
* Do not write empty tiles.
geodiff
* One of the two input files can be in CSV format.
dg_mosaic
* Save on output the mean values for MEANSUNEL, MEANSUNAZ,
and a few more.
point2dem
* Added the parameter ``--gaussian-sigma-factor`` to control the
Gaussian kernel width when creating a DEM (to be used together
with ``--search-radius-factor``).
sfs
* Improvements, speedups, bug fixes, more documentation, usage
recipes, much decreased memory usage, together with a lot of
testing and validation for the Moon.
* Can run on multiple input DEM clips (which can be chosen as
representative for the desired large input DEM region and images)
to solve for adjusted camera positions throughout this region.
* Added parallel_sfs, to run sfs as multiple processes over
multiple machines.
bundle_adjust
* Can optimize the intrinsic parameters for pinhole cameras. The
focal length, optical center, and distortion parameters can
be fixed or varied independently of each other. To be used with
``--local-pinhole``, ``--solve-intrinsics``, ``--intrinsics-to-float``.
* Added the option ``--overlap-list``. It can be used to specify which
image pairs are expected to overlap and hence to be used to
compute matches.
* Added the option ``--initial-transform`` to initialize the adjustments
based on a 4x4 rotation + translation transform, such as coming
from pc_align.
* Added the options ``--ip-inlier-threshold`` and ``--ip-uniqueness-threshold``
for finer-grained control over interest point generation.
pc_align
* Can solve for a rotation + translation or for rotation +
translation + scale using least squares instead of ICP, if the
first cloud is a DEM. It is suggested that the input clouds be
very close or otherwise the ``--initial-transform`` option be used,
for the method to converge. The option is:
``--alignment-method`` [ least-squares | similarity-least-squares ]
Misc
* Built with ISIS 3.5.0.
* Minimum supported OS versions are OSX 10.11, RHEL 6, SUSE 12, and
Ubuntu 14.
* Ship with GDAL's gdalwarp and gdaldem.
* Added integration with Zenodo so that this and all future ASP
releases will have a DOI. More info in the asp_book.pdf
RELEASE 2.5.3, August 24, 2016
------------------------------
Highlights:
- Added the ability to process ASTER L1A VNIR images via the tool
aster2asp that creates image files and both RPC and rigorous
linescan camera models that can then be passed to stereo.
The RPC model seems to work just as well as the rigorous one
and is much faster.
- Added the ability to process SPOT5 images with stereo,
bundle_adjust, and mapproject using a rigorous linescan camera model.
- Added the add_spot_rpc tool to create RPC models for SPOT5
which allows them to be mapprojected with the RPC model.
pc_align
* Can solve for a scale change in addition to a rotation and
translation to best align two clouds, hence for a similarity
transform, using option: ``--alignment-method similarity-point-to-point``.
mapproject
* Added ability to mapproject color images.
* Added option to mapproject on to a flat datum.
camera_solve
* Added option to accept multiple input camera models.
Other:
dem_mosaic
* Fix a bug with mosaicking of DEMs over very large extent.
* Fix a bug with 360 degree longitude offset.
* Added the option ``--use-centerline-weights``. It will compute
blending weights based on a DEM centerline algorithm. Produces
smoother weights if the input DEMs don't have holes or complicated
boundary.
colormap
* Added a new colormap scheme, 'cubehelix', that works better for
most color-blind people.
stereo_gui
* Use transparent pixels for displaying no-data values instead of black.
* Can delete or hillshade individual images when overlaid.
* Add control to hide/show all images when in overlay mode.
Misc
* Make ASP handle gracefully georeferenced images with some pixels
having projected coordinates outside of the range expected by PROJ.4.
* Removed the deprecated orthoproject tool. Now mapproject should be used.
* Fixed a bug in ``pc_align`` which caused the ``--max-displacement``
argument to be misread in some situations.
* Removed some extraneous code slowing down the datum_convert tool.
* Fixed a bug in point2dem handling the Albers Conic Equal Area projection.
* Added standard thread/bigtiff/LZW options to image_calc.
RELEASE 2.5.2, Feb 29, 2016
---------------------------
Highlights:
Added a constellation of features and tools to support solving for
the positions of input images lacking position information. Can be used
for aerial imagery with inaccurate or incomplete pose information,
images from low cost drones, historical images lacking metadata,
and images taken with handheld cameras.
camera_solve
* New tool which adds support for aerial imagery etc as described above.
* Uses the THEIA library (http://www.theia-sfm.org/index.html)
to compute camera positions and orientations where no metadata is available.
* Ground control points and estimated camera positions
can be used to find absolute camera positions.
* Added section to documentation describing ways to use ASP to
process imagery from NASA's IceBridge program.
camera_calibrate
* A convenience camera calibration tool that is a wrapper around
the OpenCV checkerboard calibration program with outputs in
formats for camera_solve and ASP.
bundle_adjust
* Added several options to support solving for pinhole camera
models in local coordinates using GCPs or estimated camera positions.
* Improved filtering options for which images are IP-matched.
orbitviz
* Significantly improved the accuracy of the plotted camera locations.
* Added option to load results from camera_solve.
wv_correct
* Now corrects TDI 8 (Reverse) of WV01 and TDI 8 (Forward
and Reverse) and TDI 32 (Forward) of WV02. Other correction
behavior is unchanged.
stereo_corr
* Added the ability to filter large disparities from D_sub that
can greatly slow down a run. The options are ``--rm-quantile-percentile``
and ``--rm-quantile-multiple``.
undistort_image
* A new tool to test out pinhole model lens distortion parameters.
Lens distortion models:
* Switched from binary .pinhole file format to updated version of
the old plain text .tsai file format.
* Added support for Photometrix camera calibration parameters.
* New appendix to the documentation describing the .tsai file format
and supported lens distortion models.
Other:
Tools
* Suppressed pesky aux.xml warning sometimes printed by GDAL.
* Removed the long-deprecated orthoproject tool.
* Added icebridge_kmz_to_csv and lvis2kml utilities.
point2las
* Write correct bounding box in the header.
* Respect projections that are not lon-lat.
point2dem
* Increased speed of erode option.
docs
* Mention DERT, a tool for exploring large DEMs.
* Added new section describing camera_solve tool in detail.
RELEASE 2.5.1, November 13, 2015
--------------------------------
Highlights:
stereo
* Added jitter correction for Digital Globe linescan imagery.
* Bug fix for stereo with map-projected images using the RPC
session (e.g, for map-projected Pleiades imagery).
* Added OpenCV-based SIFT and ORB interest point finding options.
bundle_adjust
* Much improved convergence for Digital Globe cameras.
* Added OpenCV-based SIFT and ORB interest point finding options.
point2dem, point2las, and pc_align
* The datum (``-r `` or ``--semi-major-axis``) is optional now.
The planet will be inferred automatically (together with the
projection) from the input images if present. This can be useful
for bodies that are not Moon, Mars, or Earth. The datum and
projection can still be overridden with ``--reference-spheroid`` (or
``--datum``) and ``--t_srs``.
dem_mosaic
* Introduce ``--priority-blending-length``, measured in input pixels.
If positive, keep unmodified values from the earliest available
DEM at the current location except a band this wide near its
boundary where blending will happen. Meant to be used with
smaller high-resolution "foreground" DEMs and larger
lower-resolution "background" DEMs that should be specified later
in the list. Changing ``--weights-exponent`` can improve transition.
pc_align
* Added the ability to compute a manual rotation + translation +
scale transform based on user-selected point correspondences
from reference to source cloud in stereo_gui.
stereo_gui
* Added the ability to generate ground control point (GCP) files
for bundle_adjust by picking features. In addition to the images
to be bundle-adjusted, one should provide a georeferenced image to find
the GCP lon-lat, and a reference DEM to find the GCP heights.
Other:
stereo
* If the input images are map-projected (georeferenced) and
alignment method is none, all image outputs of stereo are
georeferenced as well, such as GoodPixelMap, D_sub, disparity,
etc. As such, all these data can be overlaid in stereo_gui.
* The output point cloud saves datum info from input images
(even when the inputs are not georeferenced).
* Increased reliability of interest point detection.
* Decreased the default timeout to 900 seconds. This still needs
tuning and a permanent solution is necessary.
point2dem, point2las, and pc_align
* Accept ``--datum`` (``-r``) ``MOLA``, as a shortcut for the sphere with
radius 3,396,000 meters.
dem_mosaic
* Fix an issue with minor jumps across tiles.
* Introduce ``--save-dem-weight`` . Saves the weight image that
tracks how much the input DEM with given index contributed to the
output mosaic at each pixel (smallest index is 0).
* Introduce ``--save-index-map``. For each output pixel, save the
index of the input DEM it came from (applicable only for
``--first``, ``--last``, ``--min``, and ``--max``). A text file with the index
assigned to each input DEM is saved as well.
* Rename ``--blending-length`` to ``--extra-crop-length``, for clarity.
dg_mosaic
* Added the switch ``--fix-seams`` to use interest point matching
to fix seams in the output mosaic due to inconsistencies between
image and camera data. Such artifacts may show up in older
(2009 or earlier) Digital Globe images.
stereo_gui
* Added the option ``--match-file`` to view interest point matches.
* Added the options ``--delete-temporary-files-on-exit`` and
``--create-image-pyramids-only``.
* Can read the georeference of map-projected ISIS cubes.
point2dem
* Respect ``--t_projwin`` to the letter.
* Can create simultaneously DEMs at multiple resolutions (by
passing multiple values in quotes to ``--dem-spacing``).
* Fix minor discrepancies in the minor semi-axis for the WGS84,
NAD83 and WGS72 datums. Now using GDAL/OGR for that.
point2las
* Save the LAS file with a datum if the input PC had one.
image_calc
* Fix calculation bug when no-data is present.
pc_align
* Upgraded to the latest libpointmatcher. This may result in minor
alignment changes as the core algorithm got modified.
* Save all PC clouds with datum and projection info, if present. Add
comment lines with the datum and projection to CSV files.
geodiff
* Bug fix when the two DEMs have longitudes offset by 360 degrees.
colormap
* Default style is binary-red-blue. Works better than jet when
data goes out of range.
pc_merge
* Can merge clouds with 1 band. That is, can merge not only PC.tif
files but also L.tif files, with the goal of using these two
merged datasets to create a merged orthoimage with point2dem.
point2mesh
* Can create a mesh from a DEM and an orthoimage (DRG file).
RELEASE 2.5.0, August 31, 2015
------------------------------
Improved speed, coverage, and accuracy for areas with steep slopes
for ISIS, RPC and Pinhole cameras by implementing stereo using
images map-projected onto an existing DEM. This mapprojection is
multi-process and hence much faster than cam2map. This
functionality was previously available only for Digital Globe
images.
New tools:
* Added stereo_gui, an image viewer and GUI front-end.
Features:
- View extremely large images using a pyramid approach.
- If invoked with the same interface as stereo, can run stereo on
selected clips.
- Load images with int, float, and RGB pixels, including ISIS
cubes, DEMs, NTF, TIF, and other formats.
- Can overlay georeferenced images and can toggle individual
images on and off (like Google Earth).
- Show images side-by-side, as tiles on grid, or on top of each other.
- Create and view hillshaded DEMs.
- View/add/delete interest points.
- Create shadow thresholds by clicking on shadow pixels (needed
for sfs).
- Based on Michael Broxton's vwv tool.
* Added sfs, a tool to refine DEMs using shape-from-shading. Can
optimize the DEM, albedo per pixel, image exposures and camera
positions and orientations using a multi-resolution pyramid
approach. Can handle shadows. Tested with LRO NAC lunar images at
low latitudes and toward poles. It works only with ISIS images.
* Added image_calc, a tool for performing simple per-pixel arithmetic
operations on one or more images.
* Added pc_merge, a tool for concatenating ASP-produced point clouds.
* Added pansharp, a tool to apply a pansharp algorithm to a matched
grayscale image and a low resolution color image.
* Added datum_convert, a tool to transform a DEM to a different
datum (e.g., NAD27 to WGS84).
* Added geodiff, a tool for taking the (absolute) difference of two
DEMs.
* Documented the colormap tool. Added a new colormap option based
on the paper "Diverging Color Maps for Scientific Visualization"
(http://www.sandia.gov/~kmorel/documents/ColorMaps/).
* Added gdalinfo, gdal_translate, and gdalbuildvrt to the bin
directory. These executables are compiled with JPEG2000 and
BigTIFF support, and can handle NTF images.
docs
* Added a documentation section on 'tips and tricks', summarizing
in one place practices for getting the most out of ASP.
stereo
* Increase the default correlation timeout to 1800 seconds.
* Fix failure in interest point matching in certain circumstances.
* Use bundle-adjusted models (if provided) at all stages of stereo,
not just at triangulation.
* Added ``--right-image-crop-win`` in addition to ``--left-image-crop-win``.
If both are specified, stereo crops both images to desired regions
before running stereo (this is different from when only
``--left-image-crop-win`` is specified, as then no actual cropping
happens, the domain of computation is just restricted to the desired
area).
* Bug fix, remove outliers during search range determination.
* Added the option ``--ip-per-tile``, to search for more interest points
if the default is insufficient.
* If the input images are georeferenced, the good pixel map will be
written with a georeference.
point2dem
* Fixed a slight discrepancy in the value of the semi-minor axis in
the WGS84 and NAD83 datum implementations.
* Added the option ``--median-filter-params`` to
remove spikes using a median filter.
* Added the option ``--erode-length`` to erode pixels from point cloud
boundary (after outliers are removed, but before filling in holes).
* Improved hole-filling, and removed the ``--hole-fill-mode`` and
``--hole-fill-num-smooth-iter``, as there's only one algorithm now.
* Improved performance when large holes are to be filled.
* Can create a DEM from point clouds stored in CSV files containing
easting, northing, and height above datum (the PROJ.4 string
needed to interpret these numbers should be set with ``--csv-proj4``).
* Fixed a bug in creating DEMs from CSV files when different projections
are used on input and output.
* Expose to user gnomonic and oblique stereographic projections,
as well as false easting and false northing (where applicable).
This is a shortcut from using explicitly ``--t_srs`` for the PROJ.4 string.
* The default no-data value is set to the smallest float.
pc_align
* Can ingest CSV files containing easting, northing, and height
above datum (the PROJ.4 string needed to interpret these numbers
should be set with ``--csv-proj4``).
* If the reference point cloud is a DEM, the initial and final errors
in the statistics, as well as gross outlier removal, are done using
a new distance function. Instead of finding the distance from a 3D
point to the closest point in the cloud, the 3D point is projected
onto DEM's datum, its longitude and latitude are found, the
height in the DEM is interpolated, and and the obtained point on the
DEM is declared to be the closest point. This is more accurate
than the original implementation for coarse DEMs. The old
approach is available using the ``--no-dem-distances`` flag.
* Fix a bug with a 360 degree longitude offset.
point2las
* Added the ability to specify a custom projection (PROJ.4 string)
for output LAS files.
dem_mosaic
* Write GeoTIFF files with blocks of size 256 x 256 as those
may be faster to process with GDAL tools.
* Bug fix when the tool is used to re-project.
* Added the option ``--weights-blur-sigma`` to allow the blending
weights to be blurred by a Gaussian to increase their smoothness.
* Added the option ``--weight-exponent`` , to allow weights
to increase faster than linearly.
* Added ``--stddev`` option to compute standard deviation.
* Added the ability to fill holes in the output mosaic.
bundle_adjust
* Added new parameters, ``--ip-per-tile`` and ``--min-triangulation-angle``.
* Bug fix in handling situations when a point cannot get projected
into the camera.
* Bug fix in the camera adjustment logic. Any .adjust files may
need to be regenerated.
image2qtree
* Bug fixes.
cam2map4stereo.py
* Create temporary files in current directory, to avoid access
issues to system directories.
mapproject
* Can run on multiple machines.
* Use multiple processes for ISIS images, for a huge speedup.
* Bug fix, the mapprojected image should not go much beyond the DEM
it is mapprojected onto (where it would have no valid pixels).
dg_mosaic
* Default penalty weight produces a more accurate fit when creating an
RPC model from a DG model.
* Handle the situation when two images to be mosaicked start at the
same output row number.
* Added ``--target-resolution`` option to specify the output resolution in meters.
Misc.
* Upgraded to ISIS 3.4.10.
* Oldest supported OSX version is 10.8.
* Added documentation for image2qtree and hillshade.
RELEASE 2.4.2, October 6, 2014
------------------------------
ASP can perform multi-view triangulation (using both the
stereo and parallel_stereo tools). The first image is set
as reference, disparities are computed from it to the other
ones, and joint triangulation is performed.
Added a new tool, dem_mosaic, for mosaicking a large number of
DEMs, with erosion at boundary, smooth blending, and tiled output.
Instead of blending, the tool can do the first, last, min, max,
mean, median, or count of encountered DEM values.
dg_mosaic
* Support for multi-band (multi-spectral) images. Use ``--band``
to pick a band to mosaic.
stereo
* Bug fix in interest point matching in certain circumstances.
* Set the correlation timeout to 600 seconds. This is generous
and ensures runs don't stall.
point2dem
* Take as input n clouds and optionally n texture files, create a
single DEM/orthoimage.
* Take as input LAS and CSV files in addition to ASP's PC format.
* Fix a bug in the interplay of hole-filling and outlier removal
for orthoimage creation.
* Ensure that the DEM grid is always at integer multiples of the
grid size. This way, two DEMs with overlapping grids of the same
size will be exactly on top of each other, minimizing interpolation
error in subsequent mosaicking.
* Outlier removal is on by default. Can be disabled by setting
the percentage in ``--remove-outliers-params`` to 100.
bundle_adjust
* Use multiple-threads for non-ISIS sessions.
* Added the parameter ``--overlap-limit`` to limit the number
of subsequent images to search for matches to the current image.
* Added the parameter ``--camera-weight`` , to set the weight to
give to the constraint that the camera positions/orientations
stay close to the original values (only for the Ceres solver).
dem_geoid
* Support the EGM2008 geoid. The geoid surface across all Earth
is computed with an error of less than 1.5 cm compared to the
values generated by harmonic synthesis. A 2.5 x 2.5 minute grid
is used.
* Converted the EGM geoids shipped with ASP to INT16 and JPEG2000,
resulting in size reduction of more than 10x.
wv_correct
* Corrects TDI of 16, 48, 56, and 64 (forward and reverse scan
directions) for WV01, TDI of 8 (forward only) for WV01, and TDI
of 16, 48, 64 (forward and reverse scan directions) for
WV02. Returns uncorrected images in other cases.
pc_align
* Fix a crash for very large clouds.
* Use a progress bar when loading data.
* Support LAS files on input and output.
point2las
* Bug fix when saving LAS files in respect to a datum.
Documentation
* Move the non-ISIS-specific tutorial sections onto its own
chapter, to be read by both ISIS and Earth users. Updates and
cleanup.
RELEASE 2.4.1, 12 July, 2014
----------------------------
Added a new tool, bundle_adjust, which uses Google's ceres-solver
to solve for adjusted camera positions and orientations. Works
for n images and cameras, for all camera types supported by ASP.
wv_correct
* Improved corrections for WV01 images of TDI 16.
stereo_rfne
* Performance bugfix when the integer disparity is noisy.
stereo_fltr
* Fix for large memory usage when removing small islands from
disparity with ``--erode-max-size``.
stereo_tri
* Bug fixes for MER cameras.
stereo_tri and mapproject
* Added the option ``--bundle-adjust-prefix`` to read adjusted
camera models obtained by previously running bundle_adjust with
this output prefix.
point2las
* LAS files can be saved in geo-referenced format in respect
to a specified datum (option ``--reference-spheroid``).
point2dem
* Bug fix, longitude could be off by 360 degrees.
* Robustness to large jumps in point cloud values.
pc_align
* Ability to read and write CSV files having UTM data (easting,
northing, height above datum).
* Read DEMs in the ISIS cube format.
RELEASE 2.4.0, 28 April, 2014
-----------------------------
Added wv_correct, a tool for correcting artifacts in Digital Globe
WorldView-1 and WorldView-2 images with TDI of 16.
Added logging to a file for stereo, pc_align, point2dem,
point2mesh, point2las, and dem_geoid.
Added a tutorial for processing Digital Globe Earth imagery
and expanded the MOC tutorial.
Bug fixes in mosaicking of Digital Globe images.
parallel_stereo
* Use dynamic load balancing for improved performance.
* Automatically determine the optimal number of processes
and threads for each stage of stereo.
stereo_pprc
* Added the ``--skip-image-normalization`` option (for non-ISIS
images and alignment-method none), it can help with reducing
the size of data on disk and performance.
stereo_rfne
* Added new affine subpixel refinement mode,
``--subpixel-mode 3``. This mode sacrifices the error resistance
of Bayes EM mode in exchange for reduced computation time.
For some data sets this can perform as well as Bayes EM in
about one fifth the time.
stereo_fltr:
* Hole-filling is disabled by default in stereo_fltr. It is
suggested to use instead point2dem's analogous functionality.
It can be re-enabled using ``--enable-fill-holes``.
* Added the option ``--erode-max-size`` to remove isolated blobs.
* Relaxed filtering of disparities, retaining more valid
disparities. Can be adjusted with ``--filter-mode`` and related
parameters.
stereo_tri:
* Added ability to save triangulation error for a DEM as a 3D
North-East-Down vector rather than just its magnitude.
* When acting on map-projected images, handle the case when the
DEM used for map-projection does not completely encompass the
images.
pc_align:
* Read and write CSV files in a wide variety of formats, using
the ``--csv-format`` option.
* Display the translation component of the rigid alignment
transform in the local North-East-Down coordinate system, as
well as the centroid of source points used in alignment.
* Save to disk the convergence history (iteration information).
* Added the ability to explicitly specify the datum semi-axes.
* Bug fix for saving transformed clouds for Moon and Mars.
* More efficient processing of reference and source points
by loading only points in each cloud within a neighborhood
of the long/lat bounding box of the other cloud.
* Make it possible to generate ortho and error images using
point2dem with the transformed clouds output by pc_align.
point2dem:
* Replaced the core algorithm. Instead of sampling the point
cloud surface, which is prone to aliasing, the DEM height at a
given grid point is obtained as a weighted average of heights
of all points in the cloud within search radius of the grid
point, with the weights given by a Gaussian. The cutoff of the
Gaussian can be controlled using the ``--search-radius-factor``
option. The old algorithm is still available (but obsoleted)
using the ``--use-surface-sampling`` option. The new algorithm
makes the ``--fsaa`` option redundant.
* Added the ability to remove outliers by triangulation error,
either automatically (--remove-outliers) or manually, with
given error threshold (--max-valid-triangulation-error).
* Added two algorithms to fill holes in the output DEM and
orthoimage (--hole-fill-mode).
* The way the default DEM spacing is computed was modified,
to make dependent only on the local distribution of points
in the cloud and robust to outliers.
* Can handle highly noisy input point clouds without spikes in
memory usage and processing time.
* Improved memory usage and performance for large point clouds.
* Bug fix, the DEM was shifted by 1 pixel from true location.
RELEASE 2.3.0, 19 November, 2013
--------------------------------
TOOLS:
- Added pc_align, a tool for aligning point clouds, using the
libpointmatcher library
(https://github.com/ethz-asl/libpointmatcher). Sparse and dense
point clouds are supported, as well as DEMs. Two ICP methods are
supported, point-to-plane and point-to-point. Memory and processing
usage are proportional to the desired number of input points
to use rather than to the overall input data sizes.
- Added lronac2mosaic.py, a tool for merging the LE and RE images
from the LRONAC camera into a single map-projected image. The
output images can be fed into the stereo tool to generate DEMs.
- rpc_maprpoject and orthoproject are combined into a single tool
for projecting a camera image onto a DEM for any camera model
supported by Stereo Pipeline. The old orthoproject is kept for
backward compatibility for a while.
GENERAL:
- Stereo Pipeline (almost) daily and fully verified builds for all
platforms are available for the adventurous user
(http://byss.arc.nasa.gov/stereopipeline/daily_build/, which was
later moved to https://github.com/NeoGeographyToolkit/StereoPipeline/releases).
When requesting support, please provide the output of ``stereo --version``.
- The size of Stereo Pipeline output data has been reduced, by up to
40%, particularly point clouds and DEMs are between 30% to 70%
smaller. Better encoding is used, output data is rounded (up to 1
mm), and point clouds are offset and saved as float instead of
double.
- Timeout option added for stereo correlation, preventing
unreasonably long correlation times for certain image tiles.
- Subpixel mosaicking in dg_mosaic uses bilinear interpolation
instead of nearest neighbor avoiding artifacts in certain
situations.
- dg_mosaic can generate a combined RPC model in addition to the
combined DG model. It accepts flags for specifying input and
output nodata values.
- point2dem with the ``--fsaa`` option for reducing aliasing at
low-resolution DEM generation has been improved as to remove the
erosion of of valid data close to no-data values.
- Bug fixes for parallel_stereo, point2dem, etc.
RELEASE 2.2.2, 17 MAY 2013
--------------------------
(incremented from 2.2.1 after one more bugfix)
TOOLS:
- stereo_mpi renamed to parallel_stereo and made to work
on any machines with shared storage, rather than just on
supercomputers using Intel's MPI library. Bug fixes for
homography and affine epipolar alignment modes, etc.
- Bug fix for dem_geoid path to geoids, more robust datum
identification.
RELEASE 2.2.0, 6 MAY 2013
-------------------------
GENERAL:
- ISIS headers removed from IsisIO's headers.
- Removed unneeded mutex inside inpaint algorithm.
- Interest point matching and description are parallel now.
- Stereo pprc uses separable convolution for anti-aliasing.
- IsisIO made compliant with ISIS 3.4.3's API.
- Blob consolidation (for inpainting) is now parallel.
- Yamaha RMAX code dropped.
SESSIONS:
- RPC mode can now read Astrium data.
- DG added additional safety checks for XML values.
- DG, ISIS, and RPC now have affineepipolar alignment option.
- All sessions had their API changed. We now use Transform objects
instead of LUTs to reverse mapprojections and alignments.
TOOLS:
- Added dem_geoid, dg_mosaic, and stereo_mpi.
- Added new interest point matching method to stereo.
- Added new DEM seed mode for stereo.
- Point2dem sped up by reducing over rasterization of triangles.
- Added the ``--use-local-homography`` option to stereo_corr. Homography
transform is applied per tile.
- Fix point2dem where for certain projections we were setting K=0.
- Stereo can now operate using command-line arguments only, without
stereo.default.
RELEASE 2.1.0, 8 JANUARY 2013
-----------------------------
GENERAL:
- Added documentation for processing GeoEye, Digital Globe, and Dawn FC data.
- Fixed implementation of internal RANSAC function.
- DEMError has been renamed IntersectionErr. 3D IntersectionErr is
now recordable in local North East Down format.
SESSIONS:
- Added RPC processing session.
- DG sessions now use bicubic interpolation for mapprojection arithmetic.
- Fixed bug in case where DG XML file had single TLC entry.
- DG sessions now applies velocity aberration corrections.
TOOLS:
- Have point2dem use correct nodata value when writing DRGs.
- Fix segfault issue in point2dem due to triangle clipping.
- Hiedr2mosaic python script now supports missing CCD files and
start/stop resume on noproj step for bundle adjustment.
- Max pyramid level used for stereo correlation is configurable with
corr-max-levels option.
- Stereo accepts left-image-crop-win option for processing of
specific image coordinates.
- Stereo_pprc accepts nodata-threshold and nodata-percentage options
for masking (possibly shadows).
- Stereo command should now correctly call secondary executables so
that their dependencies are loaded.
RELEASE 2.0.0, 20 JUNE 2012
---------------------------
GENERAL:
- Modified ASP according to API changes in ISIS 3.4.0.
- Added new interest point matching code. Provides better initial
guess for search range.
- Complete changed stereo.default format. See stereo.default.example
for an example.
- Complete rewrote integer correlator for improved speed and less
memory use.
- Relicense code to be Apache 2 licensed instead of NOSA.
SESSIONS:
- Add normalization options to PINHOLE session.
- Added Digital Globe (DG) session. This supports the linearized
linescan camera model that is described in the supporting XML file.
- Deleted KEYPOINT session. PINHOLE essentially does all of that.
EXAMPLES:
- Added DEMError output example for MOC.
- Added jigsaw example for MOC.
- Added HiRISE example dataset.
TOOLS:
- Dropped release of isis_adjust and bundlevis.
- Fix int32 overflow in arithmetic for subsampling in preprocessing.
- Remove Python 2.4 incompatible call in cam2map4stereo.py.
- Speed up point2dem texture access by remove unnecessary mutex.
- Add earth mode and fix non spherical support in point2dem.
- Added lronac4staged.py.
- Implemented D_sub or seeded integer correlation in stereo_corr.
- Fourth channel of output PC file is now triangulation error.
- Added ``--t_srs`` option to point2dem.
- Added rpc_mapproject tool. This provides an optional mapprojection
step that can be used for DG session.
- Allow IAU2000:* projection options to be used by point2dem.
- No-Data is now colored black in GoodPixelMap.
- Make noproj step in hiedr2mosaic parallel.
RELEASE 1.0.5, 27 OCT 2011
--------------------------
Fixed ASP to work with ISIS 3.3.0's new API changes and library
dependencies.
Enabled parallel writing in Pinhole Session.
TOOLS:
- Fix possible infinite loop in stereo_corr's search range.
- Shutoff rotation invariance in automatic search range for better
quality results. This is possible because the input images are
already aligned.
- Sub image produced by stereo_pprc are now limited to around 8MB.
- Fix disparity_debug to work with integer disparities as well.
- All ASP tools should now have a '--version' option.
- Bug fix point2dem where rasterizer was accessing outside of
allocated memory.
- Speed up mask generation in stereo_pprc by avoiding mutex.
- Speed up hole filling in stereo_fltr by avoiding mutex.
RELEASE 1.0.4, 23 MAY 2011
--------------------------
Added support for CAHVORE in pinhole sessions.
TOOLS:
- Hide GDAL warnings caused by our file integrity checks.
- Mostly added standardized options for settings threads and BigTIFF.
- Have orthoproject return same type as input plus alpha channel.
- Improved edge_masking, speeds up stereo_fltr and stereo_pprc.
- Have cam2map4stereo.py explicitly use ISIS's getkey command.
- Fix and optimized point2dem. Remove caching and improved rendering
times. This should fix BigTIFF problems that have been reported.
- Improve triangulation times slightly when using mapprojected
linescan cameras.
EXAMPLES:
- Added orthoproject, image2qtree, colormap, hillshade examples to MOC.
- Added K10 example dataset.
- Added MER example dataset.
- Added a non-mapprojected MOC example.
- Added CTX example dataset.
DOCS:
- Append notes from Michael about run times.
VISION WORKBENCH benefits:
- Added threaded writing to colormap and hillshade.
- Fix hillshade problems with int16 DEMs.
RELEASE 1.0.3.1, 16 MARCH 2011
------------------------------
Updated documentation and support text files to insure compatibility
with our third party software.
RELEASE 1.0.3, 11 MARCH 2011
----------------------------
ISISIO:
Make quaternion interaction compliant with VW changes.
SESSIONS:
Correct reading of TSAI camera format.
TOOLS:
- Reduce memory footprint of ISIS_Adjust.
- MOC Example rewritten.
- Improve dash script that loads libraries on startup of application.
VISION WORKBENCH benefits:
- KD-Tree search replace with FLANN, a fast approximate nearest
neighbors. This improves speed of ipmatch, and ip alignment
option in stereo.
- Removed exception catch in Bayesian affine sub-pixel.
- Fixed type deduction problem on 32 bit systems.
- Pyramid Correlator code cleaned up. Minimal speed improvement.
- Fixed Camera Relation Network's memory leak.
- Fix image2qtree normalization and manual geo-positioning.
- Correct random seed call with faster solution.
- Default raster tile size changed to 256.
- Fix deadlocking in loading of ".vwrc", Vision Workbench's settings file.
KNOWN ISSUES
OSX seems to do excessive locking during multi-threaded rendering.
This problem is non-existent in RHEL5 and is still a mystery.
RELEASE 1.0.2, 9 DECEMBER 2010
------------------------------
ISISIO:
- IsisCameraModel support operator<< style printing.
- Correct camera pose return to be consistent with VW.
- Change IsisCameraModel to use shared_ptr to block memory leak.
TOOLS:
- Executables should catch VW and Standard errors and print human readable
responses.
- Stereo is now a python script that call multiple executables.
- Change correlation progress bar to track total completion.
- Bundle_Adjust and ISIS_Adjust switch from Euler's to quaternions.
- Bundlevis dropped CAHVOR support. Added progress bar. Converted statistics
with CDFAccumulator.
- Point2dem remove excessive rotation call
- Enforce tile rasterization size to 1024 during integer correlation.
- Select tools should now write their nodata value in the TIFF metadata.
PHOTOMETRYTK
Still unreleased, and still under development.
RELEASE 1.0.1, 24 MAY 2010
--------------------------
CORE:
- Control Network Loader removed and sent to VW's Bundle Adjustment Module.
- Build system can now use Google PerfTools.
- Kakadu was made optional in build system (ISIS 3.2.x uses this).
ISISIO:
- Optimized IsisCameraModel to use IsisInterface. Custom code can be loaded up
for individual camera types so we don't have to run through ISIS's entire
camera model. This allows us not to call GroundMap when the camera is not
mapprojected.
- Added a series of tests for the IsisCameraModel that perform unit tests
with MOC and Galileo.
- Added custom project code for Linescan cameras so not to rely on ISIS's
LineScanCameraGroundMap. This code is a bit more precise.
MPI
Added new optional module called MPI that builds on top of
Boost MPI. This is experimental development code and is not used for
anything in binary release yet.
PHOTOMETRYTK
Added new optional module call the Photometry Toolkit. This is
experimental development code and is not use for anything released
in the binary yet. This code focuses on future research of massive
mosaics (+100GB) and the ability to perform basic photometric corrections.
SESSIONS
Pinhole session modified to read CMOD files as well.
TOOLS:
- Made orthoproject more robust against odd input georeferences.
- orthoproject's auto scale and crop works again.
- Point2mesh's texture is written to a different file.
- Added aligndem and geodiff, experimental DEM alignment utilities.
- Added a quick experimental DEM profile utility called dem_profile.
- stereo now detects correlation settings automatically using OBALoG and
SGrad1 interest point functions.
- Added cam2map4stereo.py
- Remove excessive serial number calculations in isis_adjust.
- Update isis_adjust to VW's new Bundle Adjustment module for a 2x improvement.
- Stereo should now use LZW compression by default.
- Point2dem and Stereo have added option to use directory other than /tmp for
intermediate files.
- Point2dem now uses MOLA datum instead of its previous truncated value.
- Added safety check to stereo to make sure user is not supplying the
same camera.
- Added point2las, a utility for converting a point cloud to the LAS format.
TESTS
Switched from CXXTests to GTest framework.
RELEASE 1.0.0, 23 OCTOBER, 2009
-------------------------------
CORE:
- OrthoRasterizer.h is subject to change for further VW integration
- MedianFilter.h is untested/unused
- BundleAdjustUtils.* is subject to deletion for integration with
ControlNetworkLoader.*
SESSIONS:
- ISIS Session is the only fully supported session at this time
- Pinhole Session works but has not been tested for this release
- Keypoint/RMAX Session status are unknown
SPICEIO
Subject to deletion in 1.0.1
TOOLS:
- Point2dem can crash rarely. Still investigating.
- rmax* utilities are not working
================================================
FILE: README.rst
================================================
==========================
Ames Stereo Pipeline (ASP)
==========================
.. image:: https://zenodo.org/badge/DOI/latest.svg
:target: https://zenodo.org/badge/latestdoi/714891
.. image:: https://zenodo.org/badge/DOI/Version%203.6.0.svg
:target: https://zenodo.org/records/18064111
.. image:: https://readthedocs.org/projects/stereopipeline/badge/?version=latest
:target: https://stereopipeline.readthedocs.io/en/latest/?badge=latest
:alt: Documentation for latest version
The NASA Ames Stereo Pipeline (ASP) is a suite of free and open source
automated geodesy and stereogrammetry tools designed for processing
stereo images captured from satellites (around Earth and other
planets), robotic rovers, aerial cameras, and historical images, with
and without accurate camera pose information.
ASP produces cartographic products, including digital terrain models
(DTMs, synonymous with digital elevation models, DEMs),
ortho-projected images, 3D models, and bundle-adjusted networks of
cameras. These data products are suitable for science analysis,
mission planning, and public outreach.
* ASP is free software released under the Apache Software License 2.0.
* Documentation: https://stereopipeline.readthedocs.io
Installation
============
Precompiled binaries (for Linux and macOS) are available for the stable releases
and the current development build. Simply download the appropriate distribution
for your operating system, extract, and run the executables in the ``bin``
subdirectory.
- `Stable releases, daily builds, and conda packages
`_
See the `NEWS
`_
for the most recent additions.
To permanently add the ASP executable subdirectory to your PATH,
you can add the following line to your shell configuration (e.g.,
``~/.bashrc``), replacing ``/path/to/StereoPipeline/bin`` with the location
on your filesystem: ``export PATH=${PATH}:/path/to/StereoPipeline/bin``
*ISIS users*: Please install the latest `USGS ISIS
`_ if you would like to process NASA
non-terrestrial images. Users wishing to process Earth images, such as Digital
Globe, satellites with RPC cameras, or various frame/pinhole cameras do not need
to download anything else. If ASP is installed with conda, it will install ISIS
in the same environment as well, though it may not be the latest version.
Documentation
=============
The documentation, in HTML format, is at https://stereopipeline.readthedocs.io.
The documentation includes a gentle introduction to using the Stereo Pipeline,
an entry for each tool, and example processing workflows for many supported
sensors.
The ReStructured Text source files for the documentation are in the `docs`
subdirectory of the ASP distribution.
Support and user community
==========================
All bugs, feature requests, user questions, and general discussion
can be posted on the `ASP support forum
`_.
We also encourage the posting of issues on the `GitHub repo
`_ (most
such items posted on the forum will typically be converted to an
issue there for the developers to work on), as well as pull requests.
Credits
=======
ASP was developed within the Autonomous Systems and Robotics area of
the Intelligent Systems Division at NASA's Ames Research Center. It
leverages the Intelligent Robotics Group's (IRG) extensive experience
developing surface reconstruction and tools for planetary exploration
(e.g., the Mars Pathfinder and Mars Exploration Rover missions, and
rover autonomy). It has also been developed in collaboration with the
Adaptive Control and Evolvable Systems (ACES) group, and draws on
their experience developing computer vision techniques for autonomous
vehicle control systems.
See the `list of contributors
`_.
Citation
--------
In general, please use this reference for the Ames Stereo Pipeline:
Beyer, Ross A., Oleg Alexandrov, and Scott McMichael. 2018. The
Ames Stereo Pipeline: NASA's open source software for deriving and
processing terrain data, Earth and Space Science, 5.
https://doi.org/10.1029/2018EA000409.
If you are using ASP for application to Earth Images, or need a reference
which details the quality of the output, then we suggest also referencing:
Shean, D. E., O. Alexandrov, Z. Moratto, B. E. Smith, I. R. Joughin,
C. C. Porter, Morin, P. J. 2016. An automated, open-source pipeline
for mass production of digital elevation models (DEMs) from very
high-resolution commercial stereo satellite imagery. ISPRS Journal
of Photogrammetry and Remote Sensing, 116.
https://doi.org/10.1016/j.isprsjprs.2016.03.012.
In addition to the recommended citation, we ask that you also cite
the DOI for the specific version of ASP that you used for processing.
Every new release (and daily build) of ASP will have its own unique
DOI, which can be found `here `_.
Additional details for how to cite ASP in your published work can be found
in the ASP documentation.
License
=======
See LICENSE file for the full text of the license that applies to ASP.
Copyright (c) 2009-2025, United States Government as represented by
the Administrator of the National Aeronautics and Space
Administration. All rights reserved.
ASP is licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You
may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
Third-party libraries
=====================
This distribution may include some bundled third-party software as a
convenience to the user. This software, located in the ``thirdparty/``
directory of the source code release, is not covered by the
above-mentioned distribution agreement or copyright. Binary releases
distribute third party software in both the ``bin`` and ``lib``
directories. See the included documentation for detailed copyright and
license information for any third-party software or check the
`THIRDPARTYLICENSES
`_
file. In addition, various pieces of ASP depend on additional
third-party libraries that the user is expected to have installed.
================================================
FILE: RELEASEGUIDE
================================================
See https://stereopipeline.readthedocs.io/en/latest/building_asp.html.
================================================
FILE: THIRDPARTYLICENSES.rst
================================================
The NASA Ames Stereo Pipeline (ASP) would not be possible with out the
use of third party software that is also open source. Our binary release
may include the following software:
======================== ============== =====
Title License URL
======================== ============== =====
armadillo Apache-2.0 https://gitlab.com/conradsnicta/armadillo-code/-/blob/9.900.x/LICENSE.txt
Boost MIT (modified) https://github.com/boostorg/boost/blob/master/LICENSE_1_0.txt
Bullet Zlib https://github.com/bulletphysics/bullet3/blob/master/LICENSE.txt
CERES Solver BSD-3-clause https://github.com/ceres-solver/ceres-solver/blob/master/LICENSE
CURL MIT (modified) https://curl.se/docs/copyright.html
Embree Apache-2.0 https://github.com/embree/embree/blob/master/LICENSE.txt
Eigen MPL-2.0 http://eigen.tuxfamily.org/index.php?title=Main_Page#License
Fast Global Registration MIT https://github.com/intel-isl/FastGlobalRegistration/blob/master/LICENSE
FLANN BSD-3-clause http://people.cs.ubc.ca/~mariusm/index.php/FLANN/FLANN
GDAL MIT https://gdal.org/license.html
GEOS LGPL-2.1 https://trac.osgeo.org/geos/
GFlags BSD-3-clause https://github.com/gflags/gflags/blob/master/COPYING.txt
GLog BSD-3-clause https://github.com/google/glog/blob/master/COPYING
Gmm++ LGPL-2.1 http://getfem.org/gmm.html
GNU Parallel GPL-3.0 https://www.gnu.org/software/parallel/
HDF5 BSD-3-clause https://support.hdfgroup.org/ftp/HDF5/releases/COPYING
HTDP custom https://geodesy.noaa.gov/TOOLS/Htdp/Htdp.shtml
IlmBase BSD-3-clause https://github.com/AcademySoftwareFoundation/openexr/tree/v2.5.5/IlmBase
ImageMagick custom https://github.com/ImageMagick/ImageMagick/blob/master/LICENSE
ISIS public domain https://github.com/USGS-Astrogeology/ISIS3/blob/dev/LICENSE.md
JAMA public domain https://math.nist.gov/tnt/download.html
JHEAD public domain http://www.sentex.net/~mwandel/jhead/
LAPACK BSD-3-clause https://github.com/Reference-LAPACK/lapack/blob/master/LICENSE
PDAL BSD-3-clause https://github.com/PDAL/PDAL/blob/master/LICENSE.txt
LibGeoTIFF combination https://github.com/OSGeo/libgeotiff/blob/master/libgeotiff/LICENSE
LibPNG custom http://www.libpng.org/pub/png/src/libpng-LICENSE.txt
libnabo BSD-3-clause https://github.com/ethz-asl/libnabo/blob/master/README.md
libpointmatcher BSD-3-clause https://github.com/ethz-asl/libpointmatcher/blob/master/README.md
nanoflann BSD-2-clause https://github.com/jlblancoc/nanoflann/blob/master/COPYING
OBALoG BSD-3-clause http://krex.k-state.edu/dspace/handle/2097/3651
OpenCV Apache-2.0 https://github.com/opencv/opencv/blob/master/LICENSE
OpenEXR BSD-3-clause https://github.com/AcademySoftwareFoundation/openexr/blob/master/LICENSE.md
OpenJPEG BSD-2-clause https://github.com/uclouvain/openjpeg/blob/master/LICENSE
OpenSSL Apache-2.0 https://github.com/openssl/openssl/blob/master/LICENSE.txt
PCL BSD-3-clause https://github.com/PointCloudLibrary/pcl/blob/master/LICENSE.txt
PROJ MIT https://proj.org/about.html#license
Protocol Buffers BSD-3-clause https://github.com/protocolbuffers/protobuf/blob/master/LICENSE
Qt LGPL-3.0 https://www.qt.io/licensing/
Qwt LGPL-2.1 https://qwt.sourceforge.io/qwtlicense.html
SPICE custom https://naif.jpl.nasa.gov/naif/rules.html
SuiteSparse GPL-2.0 https://github.com/DrTimothyAldenDavis/SuiteSparse/blob/master/LICENSE.txt
SuperLU BSD-3-clause https://github.com/xiaoyeli/superlu/blob/master/License.txt
Theia BSD-3-clause https://github.com/sweeneychris/TheiaSfM/blob/master/license.txt
TNT public domain https://math.nist.gov/tnt/download.html
Xerces C Apache-2.0 https://github.com/apache/xerces-c/blob/master/LICENSE
yaml-cpp MIT https://github.com/jbeder/yaml-cpp/blob/master/LICENSE
======================== ============== =====
================================================
FILE: cmake/FindCairo.cmake
================================================
# TODO: This was copied from here: https://github.com/alacarte-maps/alacarte/tree/master/cmake
# Which uses the Affero General Public License.
# If we end up using this make sure that is ok!
# - Try to find the CAIRO library
# Once done this will define
#
# CAIRO_ROOT_DIR - Set this variable to the root installation of CAIRO
#
# Read-Only variables:
# CAIRO_FOUND - system has the CAIRO library
# CAIRO_INCLUDE_DIR - the CAIRO include directory
# CAIRO_LIBRARIES - The libraries needed to use CAIRO
# CAIRO_VERSION - This is set to $major.$minor.$revision (eg. 0.9.8)
#
# Copyright (c) 2008 Joshua L. Blocher
# Copyright (c) 2012 Dmitry Baryshnikov
# Copyright (c) 2013 Michael Pavlyshko
#
# Distributed under the OSI-approved BSD License
#
if (NOT WIN32)
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_check_modules(_CAIRO cairo)
SET(CAIRO_VERSION ${_CAIRO_VERSION})
STRING (REGEX REPLACE "([0-9]+).([0-9]+).([0-9]+)" "\\1" num "${CAIRO_VERSION}")
MATH (EXPR CAIRO_VERSION_V "${num}")
STRING (REGEX REPLACE "([0-9]+).([0-9]+).([0-9]+)" "\\2" num "${CAIRO_VERSION}")
MATH (EXPR CAIRO_VERSION_MAJOR "${num}")
STRING (REGEX REPLACE "([0-9]+).([0-9]+).([0-9]+)" "\\3" num "${CAIRO_VERSION}")
MATH (EXPR CAIRO_VERSION_MINOR "${num}")
endif (PKG_CONFIG_FOUND)
endif (NOT WIN32)
SET(_CAIRO_ROOT_HINTS
$ENV{CAIRO}
${CMAKE_FIND_ROOT_PATH}
${CAIRO_ROOT_DIR}
)
SET(_CAIRO_ROOT_PATHS
${CMAKE_FIND_ROOT_PATH}
$ENV{CAIRO}/src
/usr
/usr/local
)
SET(_CAIRO_ROOT_HINTS_AND_PATHS
HINTS ${_CAIRO_ROOT_HINTS}
PATHS ${_CAIRO_ROOT_PATHS}
)
FIND_PATH(CAIRO_INCLUDE_DIR
NAMES
cairo.h
HINTS
${_CAIRO_INCLUDEDIR}
${_CAIRO_ROOT_HINTS_AND_PATHS}
PATH_SUFFIXES
include
"include/cairo"
)
FIND_LIBRARY(CAIRO_LIBRARY
NAMES
cairo
HINTS
${_CAIRO_LIBDIR}
${_CAIRO_ROOT_HINTS_AND_PATHS}
PATH_SUFFIXES
"lib"
"local/lib"
)
MARK_AS_ADVANCED(CAIRO_LIBRARY)
SET(CAIRO_LIBRARIES ${CAIRO_LIBRARY})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(CAIRO "Could NOT find CAIRO, try to set the path to CAIRO root folder in the system variable CAIRO"
CAIRO_LIBRARIES
CAIRO_INCLUDE_DIR
)
MARK_AS_ADVANCED(CAIRO_INCLUDE_DIR CAIRO_LIBRARIES)
================================================
FILE: cmake/FindCairomm.cmake
================================================
# TODO: This was copied from here: https://github.com/alacarte-maps/alacarte/tree/master/cmake
# Which uses the Affero General Public License.
# If we end up using this make sure that is ok!
# - Try to find CAIROMM
# Once done this will define
#
# CAIROMM_ROOT_DIR - Set this variable to the root installation of CAIROMM
# CAIROMM_FOUND - system has CAIROMM
# CAIROMM_INCLUDE_DIR - the CAIROMM include directory
# CAIROMM_LIBRARIES - Link these to use CAIROMM
#
# Copyright (c) 2008 Joshua L. Blocher
# Copyright (c) 2012 Dmitry Baryshnikov
# Copyright (c) 2013 Michael Pavlyshko
#
# Distributed under the OSI-approved BSD License
#
if (NOT WIN32)
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_check_modules(_CAIROMM cairomm-1.0)
endif (PKG_CONFIG_FOUND)
endif (NOT WIN32)
SET(_CAIROMM_ROOT_HINTS
$ENV{CAIROMM}
${CMAKE_FIND_ROOT_PATH}
${CAIROMM_ROOT_DIR}
)
SET(_CAIROMM_ROOT_PATHS
${CMAKE_FIND_ROOT_PATH}
$ENV{CAIROMM}/src
/usr
/usr/local
)
SET(_CAIROMM_ROOT_HINTS_AND_PATHS
HINTS ${_CAIROMM_ROOT_HINTS}
PATHS ${_CAIROMM_ROOT_PATHS}
)
FIND_PATH(CAIROMM_INCLUDE_DIR
NAMES
cairomm/cairomm.h
HINTS
${_CAIROMM_INCLUDEDIR}
${_CAIROMM_ROOT_HINTS_AND_PATHS}
PATH_SUFFIXES
include
"include/cairomm-1.0"
)
FIND_LIBRARY(CAIROMM_LIBRARY
NAMES
cairomm
cairomm-1.0
${_CAIROMM_ROOT_HINTS_AND_PATHS}
PATH_SUFFIXES
"lib"
"local/lib"
)
SET(CAIROMM_LIBRARIES ${CAIROMM_LIBRARY})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(CAIROMM "Could NOT find CAIROMM, try to set the path to CAIROMM root folder in the system variable CAIROMM"
CAIROMM_LIBRARIES
CAIROMM_INCLUDE_DIR
)
MARK_AS_ADVANCED(CAIROMM_INCLUDE_DIR CAIROMM_LIBRARY)
================================================
FILE: cmake/FindFreeType.cmake
================================================
# - Locate FreeType library
# This module defines
# FREETYPE_LIBRARIES, the library to link against
# FREETYPE_FOUND, if false, do not try to link to FREETYPE
# FREETYPE_INCLUDE_DIRS, where to find headers.
# This is the concatenation of the paths:
# FREETYPE_INCLUDE_DIR_ft2build
# FREETYPE_INCLUDE_DIR_freetype2
#
# $FREETYPE_DIR is an environment variable that would
# correspond to the ./configure --prefix=$FREETYPE_DIR
# used in building FREETYPE.
#=============================================================================
# Copyright 2007-2009 Kitware, Inc.
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# (To distributed this file outside of CMake, substitute the full
# License text for the above reference.)
# Created by Eric Wing.
# Modifications by Alexander Neundorf.
# Ugh, FreeType seems to use some #include trickery which
# makes this harder than it should be. It looks like they
# put ft2build.h in a common/easier-to-find location which
# then contains a #include to a more specific header in a
# more specific location (#include ).
# Then from there, they need to set a bunch of #define's
# so you can do something like:
# #include FT_FREETYPE_H
# Unfortunately, using CMake's mechanisms like INCLUDE_DIRECTORIES()
# wants explicit full paths and this trickery doesn't work too well.
# I'm going to attempt to cut out the middleman and hope
# everything still works.
FIND_PATH(FREETYPE_INCLUDE_DIR_ft2build ft2build.h
HINTS
$ENV{FREETYPE_DIR}
PATH_SUFFIXES include
PATHS
/usr/local/X11R6/include
/usr/local/X11/include
/usr/X11/include
/sw/include
/opt/local/include
/usr/freeware/include
"C:/libs/freetype/include"
)
FIND_PATH(FREETYPE_INCLUDE_DIR_freetype2 freetype/config/ftheader.h
HINTS
$ENV{FREETYPE_DIR}/include/freetype2
PATHS
/usr/local/X11R6/include
/usr/local/X11/include
/usr/X11/include
/sw/include
/opt/local/include
/usr/freeware/include
"C:/libs/freetype/include"
PATH_SUFFIXES freetype2
)
FIND_LIBRARY(FREETYPE_LIBRARY
NAMES freetype libfreetype freetype219
HINTS
$ENV{FREETYPE_DIR}
PATH_SUFFIXES lib64 lib
PATHS
/usr/local/X11R6
/usr/local/X11
/usr/X11
/sw
/usr/freeware
"C:/libs/freetype/lib"
)
GET_FILENAME_COMPONENT( FREETYPE_LIBRARY_DIR ${FREETYPE_LIBRARY} PATH )
if (WIN32)
list(APPEND FREETYPE_LIBRARY_DIR "${FREETYPE_LIBRARY_DIR}/../bin" )
endif(WIN32)
# set the user variables
IF(FREETYPE_INCLUDE_DIR_ft2build AND FREETYPE_INCLUDE_DIR_freetype2)
SET(FREETYPE_INCLUDE_DIRS "${FREETYPE_INCLUDE_DIR_ft2build};${FREETYPE_INCLUDE_DIR_freetype2}")
ENDIF(FREETYPE_INCLUDE_DIR_ft2build AND FREETYPE_INCLUDE_DIR_freetype2)
SET(FREETYPE_LIBRARIES "${FREETYPE_LIBRARY}")
# handle the QUIETLY and REQUIRED arguments and set FREETYPE_FOUND to TRUE if
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(Freetype DEFAULT_MSG FREETYPE_LIBRARY FREETYPE_INCLUDE_DIRS)
MARK_AS_ADVANCED(FREETYPE_LIBRARY FREETYPE_INCLUDE_DIR_freetype2 FREETYPE_INCLUDE_DIR_ft2build)
================================================
FILE: cmake/FindSigC++.cmake
================================================
# Copied from list post here: https://mail.gnome.org/archives/libsigc-list/2012-February/msg00005.html
# Had to add search path "lib64/sigc++-2.0/include"
# - Try to find SigC++-2.0
# Once done, this will define
#
# SigC++_FOUND - system has SigC++
# SigC++_INCLUDE_DIRS - the SigC++ include directories
# SigC++_LIBRARIES - link these to use SigC++
include(LibFindMacros)
# Use pkg-config to get hints about paths
libfind_pkg_check_modules(SigC++_PKGCONF sigc++-2.0)
# Main include dir
find_path(SigC++_INCLUDE_DIR
NAMES sigc++/sigc++.h
PATHS ${SigC++_PKGCONF_INCLUDE_DIRS}
PATH_SUFFIXES sigc++-2.0
)
# Glib-related libraries also use a separate config header, which is in lib dir
find_path(SigC++Config_INCLUDE_DIR
NAMES sigc++config.h
PATHS ${SigC++_PKGCONF_INCLUDE_DIRS} /usr
PATH_SUFFIXES lib/sigc++-2.0/include lib64/sigc++-2.0/include
)
message("inc dir = ${SigC++Config_INCLUDE_DIR}")
libfind_library(SigC++ sigc 2.0)
# Set the include dir variables and the libraries and let libfind_process do the rest.
# NOTE: Singular variables for this library, plural for libraries this this lib depends on.
set(SigC++_PROCESS_INCLUDES SigC++_INCLUDE_DIR SigC++Config_INCLUDE_DIR)
set(SigC++_PROCESS_LIBS SigC++_LIBRARY)
libfind_process(SigC++)
================================================
FILE: cmake/LibFindMacros.cmake
================================================
# Version 1.0 (2013-04-12)
# Public Domain, originally written by Lasse Kärkkäinen
# Published at http://www.cmake.org/Wiki/CMake:How_To_Find_Libraries
# If you improve the script, please modify the forementioned wiki page because
# I no longer maintain my scripts (hosted as static files at zi.fi). Feel free
# to remove this entire header if you use real version control instead.
# Changelog:
# 2013-04-12 Added version number (1.0) and this header, no other changes
# 2009-10-08 Originally published
# Works the same as find_package, but forwards the "REQUIRED" and "QUIET" arguments
# used for the current package. For this to work, the first parameter must be the
# prefix of the current package, then the prefix of the new package etc, which are
# passed to find_package.
macro (libfind_package PREFIX)
set (LIBFIND_PACKAGE_ARGS ${ARGN})
if (${PREFIX}_FIND_QUIETLY)
set (LIBFIND_PACKAGE_ARGS ${LIBFIND_PACKAGE_ARGS} QUIET)
endif (${PREFIX}_FIND_QUIETLY)
if (${PREFIX}_FIND_REQUIRED)
set (LIBFIND_PACKAGE_ARGS ${LIBFIND_PACKAGE_ARGS} REQUIRED)
endif (${PREFIX}_FIND_REQUIRED)
find_package(${LIBFIND_PACKAGE_ARGS})
endmacro (libfind_package)
# CMake developers made the UsePkgConfig system deprecated in the same release (2.6)
# where they added pkg_check_modules. Consequently I need to support both in my scripts
# to avoid those deprecated warnings. Here's a helper that does just that.
# Works identically to pkg_check_modules, except that no checks are needed prior to use.
macro (libfind_pkg_check_modules PREFIX PKGNAME)
if (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4)
include(UsePkgConfig)
pkgconfig(${PKGNAME} ${PREFIX}_INCLUDE_DIRS ${PREFIX}_LIBRARY_DIRS ${PREFIX}_LDFLAGS ${PREFIX}_CFLAGS)
else (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4)
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_check_modules(${PREFIX} ${PKGNAME})
endif (PKG_CONFIG_FOUND)
endif (${CMAKE_MAJOR_VERSION} EQUAL 2 AND ${CMAKE_MINOR_VERSION} EQUAL 4)
endmacro (libfind_pkg_check_modules)
# Do the final processing once the paths have been detected.
# If include dirs are needed, ${PREFIX}_PROCESS_INCLUDES should be set to contain
# all the variables, each of which contain one include directory.
# Ditto for ${PREFIX}_PROCESS_LIBS and library files.
# Will set ${PREFIX}_FOUND, ${PREFIX}_INCLUDE_DIRS and ${PREFIX}_LIBRARIES.
# Also handles errors in case library detection was required, etc.
macro (libfind_process PREFIX)
# Skip processing if already processed during this run
if (NOT ${PREFIX}_FOUND)
# Start with the assumption that the library was found
set (${PREFIX}_FOUND TRUE)
# Process all includes and set _FOUND to false if any are missing
foreach (i ${${PREFIX}_PROCESS_INCLUDES})
if (${i})
set (${PREFIX}_INCLUDE_DIRS ${${PREFIX}_INCLUDE_DIRS} ${${i}})
mark_as_advanced(${i})
else (${i})
set (${PREFIX}_FOUND FALSE)
endif (${i})
endforeach (i)
# Process all libraries and set _FOUND to false if any are missing
foreach (i ${${PREFIX}_PROCESS_LIBS})
if (${i})
set (${PREFIX}_LIBRARIES ${${PREFIX}_LIBRARIES} ${${i}})
mark_as_advanced(${i})
else (${i})
set (${PREFIX}_FOUND FALSE)
endif (${i})
endforeach (i)
# Print message and/or exit on fatal error
if (${PREFIX}_FOUND)
if (NOT ${PREFIX}_FIND_QUIETLY)
message (STATUS "Found ${PREFIX} ${${PREFIX}_VERSION}")
endif (NOT ${PREFIX}_FIND_QUIETLY)
else (${PREFIX}_FOUND)
if (${PREFIX}_FIND_REQUIRED)
foreach (i ${${PREFIX}_PROCESS_INCLUDES} ${${PREFIX}_PROCESS_LIBS})
message("${i}=${${i}}")
endforeach (i)
message (FATAL_ERROR "Required library ${PREFIX} NOT FOUND.\nInstall the library (dev version) and try again. If the library is already installed, use ccmake to set the missing variables manually.")
endif (${PREFIX}_FIND_REQUIRED)
endif (${PREFIX}_FOUND)
endif (NOT ${PREFIX}_FOUND)
endmacro (libfind_process)
macro(libfind_library PREFIX basename)
set(TMP "")
if(MSVC80)
set(TMP -vc80)
endif(MSVC80)
if(MSVC90)
set(TMP -vc90)
endif(MSVC90)
set(${PREFIX}_LIBNAMES ${basename}${TMP})
if(${ARGC} GREATER 2)
set(${PREFIX}_LIBNAMES ${basename}${TMP}-${ARGV2})
string(REGEX REPLACE "\\." "_" TMP ${${PREFIX}_LIBNAMES})
set(${PREFIX}_LIBNAMES ${${PREFIX}_LIBNAMES} ${TMP})
endif(${ARGC} GREATER 2)
find_library(${PREFIX}_LIBRARY
NAMES ${${PREFIX}_LIBNAMES}
PATHS ${${PREFIX}_PKGCONF_LIBRARY_DIRS}
)
endmacro(libfind_library)
================================================
FILE: cmake/Utilities.cmake
================================================
# This file contains functions used in other parts of the project.
# Obtains a file list with all the files in a directory properly formatted
function(get_all_source_files relativePath outputFileList)
# Load all matching files into TEMP
file(GLOB TEMP
"${CMAKE_CURRENT_SOURCE_DIR}/${relativePath}/*.h"
"${CMAKE_CURRENT_SOURCE_DIR}/${relativePath}/*.hpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${relativePath}/*.cc"
"${CMAKE_CURRENT_SOURCE_DIR}/${relativePath}/*.cpp"
"${CMAKE_CURRENT_SOURCE_DIR}/${relativePath}/*.cxx"
"${CMAKE_CURRENT_SOURCE_DIR}/${relativePath}/*.tcc"
)
set(fileList) # Empty list
foreach(f ${TEMP}) # Iterate through TEMP
get_filename_component(FILENAME ${f} NAME) # Extract just the file name
set(fileList ${fileList} ${FILENAME}) # Append to the list
endforeach(f)
set(${outputFileList} ${fileList} PARENT_SCOPE)
endfunction(get_all_source_files)
# Look for a library dependency, starting with the given search
# folder. If the header files are in a subfolder of "include",
# specify that one in "inc_subfolder". The value of
# LIB_SUBDIR is used for CSM to find the library.
function(find_external_library name search_folder inc_subfolder libNameList required)
# Define the variable names we will create
set(FOUND_NAME "${name}_FOUND")
set(LIB_NAME "${name}_LIBRARIES")
set(INC_NAME "${name}_INCLUDE_DIR")
set(ASP_NAME "ASP_HAVE_PKG_${name}") # TODO: Remove VW/ASP name!
if(search_folder)
set(${FOUND_NAME} 1)
set(ext ".so")
if (APPLE)
set(ext ".dylib")
endif()
# Add each lib file that was provided.
set(${${LIB_NAME}} "")
foreach(lib ${libNameList})
set(FULL_NAME "lib${lib}${ext}")
set(FULL_PATH "${search_folder}/lib/${LIB_SUBDIR}${FULL_NAME}")
if (NOT EXISTS ${FULL_PATH})
# Try to see if maybe the lib is with an extension
file(GLOB LIB_FILES ${FULL_PATH}*)
list(GET LIB_FILES 0 FULL_PATH2) # get zero-th element
if (EXISTS ${FULL_PATH2})
set(FULL_PATH ${FULL_PATH2})
else()
message(STATUS "Missing library file: ${FULL_PATH}")
set(${FOUND_NAME} 0)
continue()
endif()
endif()
set(${LIB_NAME} ${${LIB_NAME}} ${FULL_PATH})
endforeach()
set(${INC_NAME} ${search_folder}/include/${inc_subfolder})
string(REGEX REPLACE "/$" "" ${INC_NAME} ${${INC_NAME}}) # rm trailing /
else()
# TODO: Provide effective findX.cmake files to handle these.
find_package(${name} REQUIRED)
endif()
# Check and display our results
if(${FOUND_NAME})
set(${ASP_NAME} 1)
message(STATUS "Found include files for ${name} at ${${INC_NAME}}")
include_directories("${${INC_NAME}}")
else()
if (${required})
message( FATAL_ERROR "Failed to find REQUIRED library ${name}." )
else()
message(STATUS "Failed to find ${name}")
endif()
endif()
# Pass the results back up to the parent function
set(${FOUND_NAME} ${${FOUND_NAME}} PARENT_SCOPE)
set(${LIB_NAME} ${${LIB_NAME}} PARENT_SCOPE)
set(${INC_NAME} ${${INC_NAME}} PARENT_SCOPE)
set(${ASP_NAME} ${${ASP_NAME}} PARENT_SCOPE)
message(STATUS "Found libraries for ${name} at ${${LIB_NAME}}")
endfunction(find_external_library)
# Define a custom make target that will run all tests with normal gtest output.
# - Normally you can run 'make test' to run all tests but the output is brief.
# - With this you can run 'make gtest_all' to run all tests with more output.
if (NOT TARGET gtest_all)
add_custom_target(gtest_all)
endif()
# Call this function once for each gtest target.
macro(add_to_custom_test_target test_target)
add_custom_target(${test_target}_runtest
COMMAND ${test_target} #cmake 2.6 required
DEPENDS ${test_target}
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}")
add_dependencies(gtest_all ${test_target}_runtest)
endmacro()
## Add the shared precompiled header to the current target.
## - Build it for the first target, then reuse it for all later targets.
#function(add_precompiled_header_to_target target)
# #set(PCH_PATH "${CMAKE_HOME_DIRECTORY}/src/vw/stdafx.h")
# set(PCH_PATH "../stdafx.h")
# message("PCH_PATH = ${PCH_PATH}")
# message("target = ${target}")
# get_property(pchFirstLibrary GLOBAL PROPERTY storedPchFirstLibrary)
# if(${pchFirstLibrary} STREQUAL "NA")
# # First time this is called, don't reuse the PCH compilation.
# set_property(GLOBAL PROPERTY storedPchFirstLibrary ${target})
# target_precompiled_header(${target} ${PCH_PATH})
# else()
# target_precompiled_header(${target} ${PCH_PATH} REUSE ${pchFirstLibrary})
# endif()
#endfunction(add_precompiled_header_to_target)
# Function to add a library to the project.
# - This is called in each library folder directory.
function(add_library_wrapper libName fileList testFileList dependencyList)
# Set up the library
add_library(${libName} SHARED ${fileList})
set_target_properties(${libName} PROPERTIES LINKER_LANGUAGE CXX)
#message("For ${libName}, linking DEPS: ${dependencyList}")
target_link_libraries(${libName} ${dependencyList})
# All libraries share the same precompiled header.
#add_precompiled_header_to_target(${libName})
install(TARGETS ${libName} DESTINATION lib)
# Set all the header files to be installed to the include directory
foreach(f ${fileList})
get_filename_component(extension ${f} EXT) # Get file extension
string( TOLOWER "${extension}" extensionLower )
if( extensionLower STREQUAL ".h" OR extensionLower STREQUAL ".hpp" OR extensionLower STREQUAL ".tcc")
set(fullPath "${CMAKE_CURRENT_SOURCE_DIR}/${f}")
# TODO(oalexan1): This is very fragile code because "asp" can
# also match build_asp and what not.
# That is why here we do the regex twice.
# Need to replace with code which simply installs the "asp"
# subdir.
STRING(REGEX MATCH "/asp/.*/" dir1 ${fullPath})
STRING(REGEX MATCH "asp/.*/" dir ${dir1})
INSTALL(FILES ${f} DESTINATION include/${dir})
endif()
endforeach(f)
# Add unit test for each test file given
set(TEST_MAIN_PATH "${CMAKE_SOURCE_DIR}/src/test/test_main.cc")
foreach(f ${testFileList})
get_filename_component(filename ${f} NAME_WE) # Get file name without extension
set(executableName "${libName}_${filename}") # Generate a name for the executable
#message("Adding test target ${executableName}")
# Add executable with shared main file and this file
# - This executeable should not be built unless running tests.
add_executable( ${executableName} EXCLUDE_FROM_ALL ${TEST_MAIN_PATH} ./tests/${f} )
# Link test executable against current library, gtest, and gtest_main
#target_link_libraries(${executableName} gtest "${libName}" ${GTEST_BOTH_LIBRARIES})
#message("For ${executableName}, linking DEPS: ${dependencyList};${libName}")
target_link_libraries(${executableName} gtest gtest_main ${dependencyList} ${libName})
target_compile_definitions(${executableName} PRIVATE GTEST_USE_OWN_TR1_TUPLE=1)
target_compile_definitions(${executableName} PRIVATE "TEST_OBJDIR=\"${CMAKE_CURRENT_SOURCE_DIR}/tests\"")
target_compile_definitions(${executableName} PRIVATE "TEST_SRCDIR=\"${CMAKE_CURRENT_SOURCE_DIR}/tests\"")
# These variables need to be set for each test directory
#set_property (TARGET ${executableName} APPEND PROPERTY COMPILE_DEFINITIONS "TEST_OBJDIR=\"${CMAKE_CURRENT_SOURCE_DIR}/tests\"")
#set_property (TARGET ${executableName} APPEND PROPERTY COMPILE_DEFINITIONS "TEST_SRCDIR=\"${CMAKE_CURRENT_SOURCE_DIR}/tests\"")
add_test(${executableName} ${executableName})
add_to_custom_test_target(${executableName}) # Add to the verbose test make target.
endforeach(f)
endfunction( add_library_wrapper )
================================================
FILE: cmake/linux_cross_toolchain.cmake
================================================
# Cross-compile toolchain for building on Mac ARM64 targeting Linux x86_64.
#
# Both VisionWorkbench and StereoPipeline have a copy of this file. Keep them
# in sync when making changes.
#
# Prerequisites:
# - conda-forge clang 16+ with -fopenmp support (in MAC_ASP_DEPS env)
# - lld (LLVM linker) in MAC_ASP_DEPS env
# - Linux deps prefix with sysroot and GCC 12.4.0 libraries
#
# Usage (VW, from build_linux/):
# cmake .. \
# -DCMAKE_TOOLCHAIN_FILE=../cmake/linux_cross_toolchain.cmake \
# -DLINUX_DEPS_PREFIX=$HOME/miniconda3/envs/asp_deps_linux \
# -DMAC_ASP_DEPS=$HOME/anaconda3/envs/asp_deps \
# -DASP_DEPS_DIR=$HOME/miniconda3/envs/asp_deps_linux \
# -DCMAKE_INSTALL_PREFIX=$HOME/projects/StereoPipeline/install_linux
#
# Usage (ASP, from build_linux/):
# cmake .. \
# -DCMAKE_TOOLCHAIN_FILE=../cmake/linux_cross_toolchain.cmake \
# -DLINUX_DEPS_PREFIX=$HOME/miniconda3/envs/asp_deps_linux \
# -DMAC_ASP_DEPS=$HOME/anaconda3/envs/asp_deps \
# -DASP_DEPS_DIR=$HOME/miniconda3/envs/asp_deps_linux \
# -DVISIONWORKBENCH_INSTALL_DIR=$HOME/projects/StereoPipeline/install_linux \
# -DCMAKE_INSTALL_PREFIX=$HOME/projects/StereoPipeline/install_linux \
# -DOpenMP_C_FLAGS=-fopenmp \
# -DOpenMP_CXX_FLAGS=-fopenmp \
# -DOpenMP_C_LIB_NAMES=omp \
# -DOpenMP_CXX_LIB_NAMES=omp \
# -DOpenMP_omp_LIBRARY=${LINUX_DEPS_PREFIX}/lib/libomp.so
# Propagate these variables to try_compile() projects. Without this, CMake's
# ABI detection re-invokes this toolchain file but loses the -D variables.
list(APPEND CMAKE_TRY_COMPILE_PLATFORM_VARIABLES LINUX_DEPS_PREFIX MAC_ASP_DEPS)
# Validate required variables.
if(NOT DEFINED LINUX_DEPS_PREFIX)
message(FATAL_ERROR "Set -DLINUX_DEPS_PREFIX=/path/to/asp_deps_linux")
endif()
if(NOT DEFINED MAC_ASP_DEPS)
message(FATAL_ERROR "Set -DMAC_ASP_DEPS=/path/to/mac/asp_deps")
endif()
# Derived paths.
set(CROSS_SYSROOT "${LINUX_DEPS_PREFIX}/x86_64-conda-linux-gnu/sysroot")
# Auto-detect GCC version in the linux prefix.
file(GLOB GCC_VERSION_DIRS "${LINUX_DEPS_PREFIX}/lib/gcc/x86_64-conda-linux-gnu/*")
list(GET GCC_VERSION_DIRS 0 GCC_LIB)
message(STATUS "Cross-compile: GCC lib dir = ${GCC_LIB}")
set(GCC_INC "${GCC_LIB}/include/c++")
# Target platform.
set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR x86_64)
# Compilers (conda-forge clang from the Mac env).
set(CMAKE_C_COMPILER "${MAC_ASP_DEPS}/bin/clang")
set(CMAKE_CXX_COMPILER "${MAC_ASP_DEPS}/bin/clang++")
# Compiler flags. Pass --sysroot directly in flags instead of using
# CMAKE_SYSROOT, which strips path prefixes and corrupts other paths.
set(CROSS_COMMON_FLAGS
"--target=x86_64-unknown-linux-gnu \
--sysroot=${CROSS_SYSROOT} \
--gcc-toolchain=${LINUX_DEPS_PREFIX} \
-B${GCC_LIB} \
-fuse-ld=${MAC_ASP_DEPS}/bin/ld.lld \
-I${CROSS_SYSROOT}/usr/include \
-L${GCC_LIB} \
-L${LINUX_DEPS_PREFIX}/lib")
set(CMAKE_C_FLAGS_INIT "${CROSS_COMMON_FLAGS}")
set(CMAKE_CXX_FLAGS_INIT
"${CROSS_COMMON_FLAGS} \
-isystem ${GCC_INC} \
-isystem ${GCC_INC}/x86_64-conda-linux-gnu \
-Wno-enum-constexpr-conversion")
# Linker flags.
set(CMAKE_EXE_LINKER_FLAGS_INIT "-L${GCC_LIB} -L${LINUX_DEPS_PREFIX}/lib")
set(CMAKE_SHARED_LINKER_FLAGS_INIT "-L${GCC_LIB} -L${LINUX_DEPS_PREFIX}/lib")
# Search only the cross-prefix for libraries and headers.
set(CMAKE_FIND_ROOT_PATH "${LINUX_DEPS_PREFIX}")
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
# Qt moc/rcc/uic are host tools that must run on Mac, not target Linux binaries.
set(QT_HOST_PATH "${MAC_ASP_DEPS}")
set(QT_MOC_EXECUTABLE "${MAC_ASP_DEPS}/bin/moc")
set(QT_RCC_EXECUTABLE "${MAC_ASP_DEPS}/bin/rcc")
set(QT_UIC_EXECUTABLE "${MAC_ASP_DEPS}/bin/uic")
================================================
FILE: conda/asp_2.7.0_linux_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=0_gnu
- ale=0.7.2=py36h9e03d57_1
- armadillo=9.200.7=hf4e8f56_0
- arpack=3.7.0=hc6cf775_1
- blas=1.1=openblas
- boost=1.68.0=py36h8619c78_1001
- boost-cpp=1.68.0=h11c811c_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h516909a_2
- ca-certificates=2020.6.20=hecda079_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=h0948850_10
- certifi=2020.6.20=py36h9f0ad1d_0
- csm=v3.0.3.1=h6bb024c_1001
- cspice=66=h516909a_1009
- curl=7.64.0=h646f8bb_0
- dbus=1.13.0=h4e0c4b3_1000
- decorator=4.4.2=py_0
- eigen=3.3.7=hc9558a2_1001
- embree=2.16.0=0
- expat=2.2.9=he1b5a44_2
- ffmpeg=3.4.1=0
- fgr=e78ce15=hf484d3e_0
- flann=1.9.1=0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=2.001=hab24e00_0
- font-ttf-source-code-pro=2.030=hab24e00_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.12.1=4
- fonts-conda-forge=1=0
- freetype=2.7=1
- gdal=2.0.2=hf484d3e_0
- geoid=1.0=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.4.2=hb54a4aa_1
- gettext=0.19.8.1=hc5be6a0_1002
- gflags=2.2.2=he1b5a44_1002
- giflib=5.2.1=h516909a_2
- glib=2.51.4=0
- glog=0.4.0=h49b9bf7_3
- gmm=5.0=0
- gmp=6.2.0=he1b5a44_2
- gsl=2.6=h294904e_0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=hf484d3e_1000
- ilmbase=2.5.2=h8b12597_0
- imagemagick=6.8.6_10=hf484d3e_0
- isis=4.1.0=0
- isis-headers=4.1.0=1000
- jama=125=0
- jasper=1.900.1=4
- jpeg=9b=h024ee3a_2
- kakadu=1=0
- krb5=1.16.3=hc83ff2d_1000
- laszip=2.1.0=hf484d3e_1
- ld_impl_linux-64=2.34=h53a641e_5
- libblas=3.8.0=11_openblas
- libcblas=3.8.0=11_openblas
- libcurl=7.64.0=h01ee5af_0
- libedit=3.1.20191231=h46ee950_0
- libffi=3.2.1=he1b5a44_1007
- libgcc=7.2.0=h69d50b8_2
- libgcc-ng=9.2.0=h24d8f2e_2
- libgfortran=3.0.0=1
- libgfortran-ng=7.5.0
- libgomp=9.2.0=h24d8f2e_2
- libiconv=1.15=h516909a_1006
- liblapack=3.8.0=11_openblas
- liblas=1.8.1=hf484d3e_1000
- libnabo=2df86e0=hf484d3e_0
- libopenblas=0.3.6=h5a2b251_2
- libpng=1.6.37=hed695b0_1
- libpointmatcher=bcf4b04=hf484d3e_0
- libpq=10.6=h13b8bad_1000
- libprotobuf=3.9.1=h8b12597_0
- libssh2=1.8.0=h1ad7b7a_1003
- libstdcxx-ng=9.2.0=hdf63c60_2
- libtiff=4.0.9=he6b73bb_1
- libuuid=2.32.1=h14c3975_1000
- libwebp=0.5.2=7
- libxcb=1.13=h14c3975_1002
- libxml2=2.9.9=h13577e0_2
- llvm-openmp=8.0.1=hc9558a2_0
- mesalib=18.3.1=h590aaf7_0
- metis=5.1.0=he1b5a44_1005
- mysql=5.7.20=hf484d3e_1001
- mysql-connector-c=6.1.11=hab6429c_1002
- nanoflann=1.3.1=0
- ncurses=6.1=hf484d3e_1002
- networkx=2.4=py_1
- nlohmann_json=3.7.3=he1b5a44_1
- nn=1.86.0=h14c3975_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.2=he513fc3_0
- openjpeg=2.1.0=6
- openmp=8.0.1=0
- openssl=1.0.2u=h516909a_0
- parallel=20200522=0
- pbzip2=1.1.13=0
- pcl=1.9.1=h482114b_1002
- pcre=8.44=he1b5a44_0
- perl=5.26.2=h516909a_1006
- pip=20.1.1=py_1
- pixman=0.34.0=h14c3975_1003
- postgresql=10.6=h66cca7a_1000
- proj4=4.9.3=h516909a_9
- protobuf=3.9.1=py36he1b5a44_0
- pthread-stubs=0.4=h14c3975_1001
- pvl=0.3.0=py_1
- python=3.6.7=hd21baee_1002
- python-dateutil=2.8.1=py_0
- python_abi=3.6=1_cp36m
- pytz=2020.1=pyh9f0ad1d_0
- pyyaml=5.3.1=py36h8c4c3a4_0
- qhull=7.2.0=0
- qt=5.9.6=0
- qwt=6.1.3=0
- readline=7.0=hf8c457e_1001
- scipy=1.2.1=py36h09a28d5_1
- setuptools=47.3.1=py36h9f0ad1d_0
- six=1.15.0=pyh9f0ad1d_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h8b20d00_0
- stereo-pipeline=2.7.0=hf484d3e_20200727
- suitesparse=5.7.2=h717dc36_0
- superlu=5.2.1=hfe2efc7_1207
- tbb=2020.1=hc9558a2_0
- theia=f5d93f5=hf484d3e_1001
- tk=8.6.10=hed695b0_0
- tnt=126=0
- tzcode=2020a=h516909a_0
- usgscsm=a53f9cf=h6bb024c_0
- visionworkbench=2.7.0=hf484d3e_0
- wheel=0.34.2=py_1
- x264=20131218=0
- xerces-c=3.1.4=0
- xorg-fixesproto=5.0=h14c3975_1002
- xorg-inputproto=2.3.2=h14c3975_1002
- xorg-kbproto=1.0.7=h14c3975_1002
- xorg-libice=1.0.10=h516909a_0
- xorg-libsm=1.2.3=h84519dc_1000
- xorg-libx11=1.6.9=h516909a_0
- xorg-libxau=1.0.9=h14c3975_0
- xorg-libxdmcp=1.1.3=h516909a_0
- xorg-libxext=1.3.4=h516909a_0
- xorg-libxfixes=5.0.3=h516909a_1004
- xorg-libxi=1.7.10=h516909a_0
- xorg-libxrender=0.9.10=h516909a_1002
- xorg-renderproto=0.11.1=h14c3975_1002
- xorg-xextproto=7.3.0=h14c3975_1002
- xorg-xproto=7.0.31=h14c3975_1007
- xz=5.2.5=h516909a_0
- yaml=0.2.5=h516909a_0
- zlib=1.2.11=h516909a_1006
================================================
FILE: conda/asp_2.7.0_osx_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.7.2=py36h855b5bd_1
- armadillo=9.200.4=hcb88e47_1205
- arpack=3.6.3=h06d5271_1005
- blas=1.1=openblas
- boost=1.68.0=py36h9888f84_1001
- boost-cpp=1.68.0=h6f8c590_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0b31af3_2
- ca-certificates=2020.6.20=hecda079_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=hb9d6bad_10
- certifi=2020.6.20=py36h9f0ad1d_0
- csm=v3.0.3.1=1001
- cspice=66=h0b31af3_1007
- curl=7.64.0=heae2a1f_0
- decorator=4.4.2=py_0
- eigen=3.3.7=ha1b3eb9_1001
- embree=2.16.0=h6834224_0
- expat=2.2.9=h4a8c4bd_2
- ffmpeg=3.4.1=0
- fgr=e78ce15=h0a44026_0
- flann=1.9.1=0
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=2.001=hab24e00_0
- font-ttf-source-code-pro=2.030=hab24e00_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.12.1=4
- fonts-conda-forge=1=0
- freetype=2.7=1
- gdal=2.0.2=h0a44026_0
- geoid=1.0=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.4.2=hb54a4aa_1
- gettext=0.19.8.1=h46ab8bc_1002
- gflags=2.2.2=h4a8c4bd_1002
- giflib=5.2.1=h0b31af3_2
- glib=2.51.4=0
- glog=0.4.0=h700f914_3
- gmm=5.0=h6aef312_0
- gmp=6.2.0=h4a8c4bd_2
- gsl=2.6=ha2d443c_0
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=h0a44026_1000
- ilmbase=2.5.2=hd174df1_0
- imagemagick=6.8.6_10=h0a44026_0
- isis=4.1.0=0
- isis-headers=4.1.0=1000
- jama=125=0
- jasper=1.900.1=4
- jpeg=9b=he5867d9_2
- kakadu=1=0
- krb5=1.16.3=h24a3359_1000
- laszip=2.1.0=h0a44026_1
- libblas=3.8.0=11_openblas
- libcblas=3.8.0=11_openblas
- libcurl=7.64.0=h76de61e_0
- libcxx=10.0.0=1
- libedit=3.1.20191231=hed1e85f_0
- libffi=3.2.1=h4a8c4bd_1007
- libgcc=4.8.5=hdbeacc1_10
- libgfortran=3.0.1=0
- libiconv=1.15=h0b31af3_1006
- liblapack=3.8.0=11_openblas
- liblas=1.8.1=h0a44026_1000
- libnabo=2df86e0=h0a44026_0
- libopenblas=0.3.6=hdc02c5d_2
- libpng=1.6.37=hbbe82c9_1
- libpointmatcher=bcf4b04=h0a44026_0
- libpq=10.6=hbe1e24e_1000
- libprotobuf=3.9.1=hfbae3c0_0
- libssh2=1.8.0=hf30b1f0_1003
- libtiff=4.0.9=he6b73bb_1
- libwebp=0.5.2=7
- libxcb=1.13=h1de35cc_1002
- libxml2=2.9.9=hd80cff7_2
- llvm-openmp=10.0.1=h28b9765_0
- mesalib=18.0.0=hb6cfc13_1
- metis=5.1.0=h4a8c4bd_1005
- mysql=5.7.20=h0a44026_1001
- mysql-connector-c=6.1.11=had4e77e_1002
- nanoflann=1.3.1=0
- ncurses=6.1=h0a44026_1002
- networkx=2.4=py_1
- nlohmann_json=3.7.3=h4a8c4bd_1
- nn=1.86.0=h1de35cc_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.2=h7475705_0
- openjpeg=2.1.0=6
- openssl=1.0.2u=h0b31af3_0
- parallel=20200522=0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.9.1=hdd77166_1002
- pcre=8.44=h4a8c4bd_0
- perl=5.26.2=haec8ef5_1006
- pip=20.1.1=py36_1
- pixman=0.34.0=h1de35cc_1003
- postgresql=10.6=ha1bbaa7_1000
- proj4=4.9.3=h01d97ff_9
- protobuf=3.9.1=py36h6de7cb9_0
- pthread-stubs=0.4=h1de35cc_1001
- pvl=0.3.0=py_1
- python=3.6.7=h4a56312_1002
- python-dateutil=2.8.1=py_0
- python_abi=3.6=1_cp36m
- pytz=2020.1=pyh9f0ad1d_0
- pyyaml=5.3.1=py36h37b9a7d_0
- qhull=7.2.0=0
- qt=5.9.6=0
- qwt=6.1.3=0
- readline=7.0=hcfe32e1_1001
- scipy=1.2.1=py36hbd7caa9_1
- setuptools=47.3.1=py36_0
- six=1.15.0=pyh9f0ad1d_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h9721f7c_0
- stereo-pipeline=2.7.0=20200727
- suitesparse=5.7.2=h0e59142_0
- superlu=5.2.1=hbced767_1205
- tbb=2019.9=ha1b3eb9_1
- theia=f5d93f5=h0a44026_1001
- tk=8.6.10=hb0a8c7a_0
- tnt=126=0
- tzcode=2020a=h0b31af3_0
- usgscsm=a53f9cf=h04f5b5a_0
- visionworkbench=2.7.0=0
- wheel=0.34.2=py36_0
- x264=20131218=0
- xerces-c=3.1.4=h10f7eb2_0
- xorg-fixesproto=5.0=h1de35cc_1002
- xorg-inputproto=2.3.2=h1de35cc_1002
- xorg-kbproto=1.0.7=h1de35cc_1002
- xorg-libice=1.0.10=h01d97ff_0
- xorg-libsm=1.2.3=h01d97ff_1000
- xorg-libx11=1.6.9=h0b31af3_0
- xorg-libxau=1.0.9=h1de35cc_0
- xorg-libxdmcp=1.1.3=h01d97ff_0
- xorg-libxext=1.3.4=h01d97ff_0
- xorg-libxfixes=5.0.3=h01d97ff_1004
- xorg-libxi=1.7.10=h01d97ff_0
- xorg-xextproto=7.3.0=h1de35cc_1002
- xorg-xproto=7.0.31=h1de35cc_1007
- xz=5.2.5=h1de35cc_0
- yaml=0.2.5=h0b31af3_0
- zlib=1.2.11=h1de35cc_3
================================================
FILE: conda/asp_3.0.0_linux_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=1_gnu
- _sysroot_linux-64_curr_repodata_hack=3=h5bd9786_12
- ale=0.8.5=py36h605e78d_3
- armadillo=9.900.5=h7c03176_0
- arpack=3.7.0=hc6cf775_2
- blas=1.1=openblas
- boost=1.68.0=py36h8619c78_1001
- boost-cpp=1.68.0=h11c811c_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- ca-certificates=2021.5.30=ha878542_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=h0948850_10
- certifi=2021.5.30=py36h5fab9bb_0
- chrpath=0.16=h7f98852_1002
- csm=3.0.3.3=hc9558a2_0
- cspice=66=h7f98852_1014
- curl=7.64.0=h646f8bb_0
- dbus=1.13.0=h4e0c4b3_1000
- decorator=5.0.9=pyhd8ed1ab_0
- eigen=3.3.7=hc9558a2_1001
- embree=2.16.0=0
- expat=2.4.1=h9c3ff4c_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_hfc0cae8_1114
- fgr=isis5.0.1
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- geoid=1.0=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.4.3=hb6868eb_1001
- gettext=0.19.8.1=hf34092f_1004
- gflags=2.2.2=he1b5a44_1004
- giflib=5.2.1=h36c2ea0_2
- git=2.14.2=2
- glib=2.51.4=0
- glog=0.4.0=h49b9bf7_3
- gmm=5.0=0
- gmp=6.2.1=h58526e2_0
- gsl=2.7=he838d99_0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=hf484d3e_1000
- ilmbase=2.5.5=h780b84a_0
- imagemagick=isis5.0.1
- inja=3.3.0=h9c3ff4c_0
- isis=5.0.1=0
- jama=125=0
- jasper=1.900.1=h07fcdf6_1006
- jpeg=9d=h36c2ea0_0
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_12
- krb5=1.16.3=hc83ff2d_1000
- laszip=2.1.0=hf484d3e_1
- ld_impl_linux-64=2.35.1=h7274673_9
- libblas=3.8.0=17_openblas
- libcblas=3.8.0=17_openblas
- libcurl=7.64.0=h01ee5af_0
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis5.0.1
- libffi=3.2.1=he1b5a44_1007
- libgcc=7.2.0=h69d50b8_2
- libgcc-ng=11.1.0=hc902ee8_8
- libgdal=2.4.1_isis5.0.1=h3fd9d12_0
- libgfortran=3.0.0=1
- libgfortran-ng=7.5.0=h14aa051_19
- libgfortran4=7.5.0=h14aa051_19
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=11.1.0=hc902ee8_8
- libiconv=1.15=h516909a_1006
- liblapack=3.8.0=17_openblas
- liblas=isis5.0.1
- libnabo=isis5.0.1
- libopenblas=0.3.10=h5a2b251_0
- libpng=1.6.37=h21135ba_2
- libpointmatcher=isis5.0.1
- libpq=10.6=h13b8bad_1000
- libprotobuf=3.9.2=h8b12597_0
- libssh2=1.8.0=h1ad7b7a_1003
- libstdcxx-ng=11.1.0=h56837e0_8
- libtiff=4.0.10=hc3755c2_1005
- libuuid=2.32.1=h7f98852_1000
- libwebp=0.5.2=7
- libxcb=1.13=h7f98852_1003
- libxml2=2.9.9=h13577e0_2
- llvm-openmp=8.0.1=hc9558a2_0
- lz4-c=1.9.3=h9c3ff4c_1
- mesalib=18.3.1=h590aaf7_0
- metis=5.1.0=h58526e2_1006
- mpfr=4.1.0=h9202a9a_1
- mysql=5.7.20=hf484d3e_1001
- mysql-connector-c=6.1.11=hab6429c_1002
- nanoflann=1.3.2=ha770c72_0
- ncurses=6.2=he6710b0_1
- networkx=2.5=py_0
- nlohmann_json=3.10.2=h9c3ff4c_0
- nn=1.86.0=h14c3975_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=hf817b99_0
- openjpeg=2.1.0=6
- openmp=8.0.1=0
- openssl=1.0.2u=h516909a_0
- parallel=20210822=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.9.1=h482114b_1002
- pcre=8.45=h9c3ff4c_0
- perl=5.32.1=0_h7f98852_perl5
- pip=21.0.1=py36h06a4308_0
- pixman=0.34.0=h14c3975_1003
- postgresql=10.6=h66cca7a_1000
- proj4=5.2.0=he1b5a44_1006
- protobuf=3.9.2=py36he1b5a44_1
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.2.1=pyhd8ed1ab_0
- python=3.6.7=hd21baee_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2021.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hf8c457e_1001
- s2p-subset=isis5.0.1
- scipy=1.2.1=py36h09a28d5_1
- setuptools=52.0.0=py36h06a4308_0
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h8b20d00_0
- stereo-pipeline=3.0.0=h3fd9d12_0
- suitesparse=5.10.1=hd8046ac_0
- superlu=5.2.2=hfe2efc7_0
- sysroot_linux-64=2.17=h4a8ded7_12
- tbb=2020.2=h4bd325d_4
- theia=isis5.0.1
- tk=8.6.10=hbc83047_0
- tnt=126=0
- tzcode=2021a=h7f98852_2
- usgscsm=1.5.2
- visionworkbench=3.0.0=h3fd9d12_0
- wheel=0.37.0=pyhd3eb1b0_0
- x264=20131218=0
- xerces-c=3.1.4=0
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.0.10=h7f98852_0
- xorg-libsm=1.2.3=hd9c2040_1000
- xorg-libx11=1.7.2=h7f98852_0
- xorg-libxau=1.0.9=h7f98852_0
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h7f98852_1
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-xextproto=7.3.0=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.5=h7b6447c_0
- yaml=0.2.5=h516909a_0
- zlib=1.2.11=h7b6447c_3
- zstd=1.4.9=ha95c52a_0
================================================
FILE: conda/asp_3.0.0_osx_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.8.5=py36hc61eee1_3
- armadillo=9.200.4=hcb88e47_1205
- arpack=3.6.3=h06d5271_1005
- blas=1.1=openblas
- boost=1.68.0=py36h9888f84_1001
- boost-cpp=1.68.0=h6f8c590_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- ca-certificates=2021.5.30=h033912b_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=hb9d6bad_10
- certifi=2021.5.30=py36h79c6626_0
- cmake=3.3.1=0
- csm=3.0.3.3=0
- cspice=66=h0d85af4_1014
- curl=7.64.0=heae2a1f_0
- decorator=5.0.9=pyhd8ed1ab_0
- eigen=3.3.7=h04f5b5a_0
- embree=2.16.0=h6834224_0
- expat=2.4.1=he49afe7_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_h5c49c53_1109
- fgr=isis5.0.1
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- geoid=1.0=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.4.3=h113155d_1001
- gettext=0.19.8.1=h46ab8bc_1002
- gflags=2.2.2=hb1e8313_1004
- giflib=5.2.1=hbcb3906_2
- git=2.11.1=0
- glib=2.51.4=0
- glog=0.4.0=hb7f4fc5_3
- gmm=5.0=h6aef312_0
- gmp=6.2.1=h2e338ed_0
- gsl=2.7=h93259b0_0
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=h0a44026_1000
- ilmbase=2.5.5=hfab91a5_0
- imagemagick=isis5.0.1
- inja=3.2.0=h1c7c35f_0
- isis=5.0.1=0
- jama=125=0
- jasper=1.900.1=h636a363_1006
- jpeg=9d=hbcb3906_0
- kakadu=1=0
- krb5=1.16.3=h24a3359_1000
- laszip=2.1.0=h0a44026_1
- libblas=3.8.0=17_openblas
- libcblas=3.8.0=17_openblas
- libcurl=7.64.0=h76de61e_0
- libcxx=12.0.1=habf9029_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis5.0.1
- libffi=3.2.1=hb1e8313_1007
- libgcc=4.8.5=hdbeacc1_10
- libgdal=2.4.1_isis5.0.1
- libgfortran=3.0.1=0
- libiconv=1.15=h0b31af3_1006
- liblapack=3.8.0=17_openblas
- liblas=isis5.0.1
- libnabo=isis5.0.1
- libopenblas=0.3.10=h0794777_0
- libpng=1.6.37=h7cec526_2
- libpointmatcher=isis5.0.1
- libpq=10.6=hbe1e24e_1000
- libprotobuf=3.9.2=hfbae3c0_0
- libssh2=1.8.0=hf30b1f0_1003
- libtiff=4.0.10=ha78913b_1005
- libwebp=0.5.2=7
- libxcb=1.13=h35c211d_1003
- libxml2=2.9.9=hd80cff7_2
- llvm-openmp=12.0.1=hda6cdc1_0
- lz4-c=1.9.3=h046ec9c_0
- mesalib=18.0.0=hb6cfc13_1
- metis=5.1.0=h2e338ed_1006
- mpfr=4.0.2=h72d8aaf_1
- mysql=5.7.20=h0a44026_1001
- mysql-connector-c=6.1.11=had4e77e_1002
- nanoflann=1.3.2=h694c41f_0
- ncurses=6.2=h2e338ed_4
- networkx=2.5=py_0
- nlohmann_json=3.9.1=he49afe7_1
- nn=1.86.0=h1de35cc_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=h7fa7ffa_0
- openjpeg=2.1.0=6
- openssl=1.0.2u=h0b31af3_0
- parallel=20210622=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.9.1=hdd77166_1002
- pcre=8.45=he49afe7_0
- perl=5.32.1=0_h0d85af4_perl5
- pip=21.1.3=pyhd8ed1ab_0
- pixman=0.34.0=h1de35cc_1003
- postgresql=10.6=ha1bbaa7_1000
- proj4=5.2.0=h6de7cb9_1006
- protobuf=3.9.2=py36h6de7cb9_1
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.2.1=pyhd8ed1ab_0
- python=3.6.7=h4a56312_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2021.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h20b66c6_0
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hcfe32e1_1001
- s2p-subset=isis5.0.1
- scipy=1.2.1=py36hbd7caa9_1
- setuptools=49.6.0=py36h79c6626_3
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h9721f7c_0
- stereo-pipeline=3.0.0=0
- suitesparse=5.10.1=h68a9093_0
- superlu=5.2.1=hbced767_1205
- tbb=2020.2=h940c156_4
- theia=isis5.0.1
- tk=8.6.10=h0419947_1
- tnt=126=0
- tzcode=2021a=h0d85af4_2
- usgscsm=1.5.2
- visionworkbench=3.0.0=0
- wheel=0.36.2=pyhd3deb0d_0
- x264=20131218=0
- xerces-c=3.1.4=h10f7eb2_0
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.0.10=h0d85af4_0
- xorg-libsm=1.2.3=h0d85af4_1000
- xorg-libx11=1.7.2=h0d85af4_0
- xorg-libxau=1.0.9=h35c211d_0
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=h0d85af4_1
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-xextproto=7.3.0=h35c211d_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.5=haf1e3a3_1
- yaml=0.2.5=haf1e3a3_0
- zlib=1.2.11=h7795811_1010
- zstd=1.4.9=h582d3a0_0
================================================
FILE: conda/asp_3.1.0_linux_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=1_gnu
- _sysroot_linux-64_curr_repodata_hack=3=h5bd9786_13
- ale=0.8.5=py36h605e78d_3
- armadillo=10.8.2=h7c03176_0
- arpack=3.7.0=hc6cf775_2
- binutils_impl_linux-64=2.36.1=h193b22a_2
- binutils_linux-64=2.36=hf3e587d_9
- blas=1.1=openblas
- boost=1.68.0=py36h8619c78_1001
- boost-cpp=1.68.0=h11c811c_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- ca-certificates=2021.10.8=ha878542_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=hf302a74_15
- chrpath=0.16=h7f98852_1002
- cmake=3.15.5=hf94ab9c_0
- csm=3.0.3.3=hc9558a2_0
- cspice=66=h7f98852_1015
- curl=7.64.0=h646f8bb_0
- dbus=1.13.0=h4e0c4b3_1000
- decorator=5.1.1=pyhd8ed1ab_0
- eigen=3.4.0=h4bd325d_0
- embree=2.16.0=0
- expat=2.4.8=h27087fc_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_hfc0cae8_1114
- fgr=isis6=h3fd9d12_0
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- gcc_impl_linux-64=11.2.0=h82a94d6_16
- gcc_linux-64=11.2.0=h39a9532_9
- geoid=1.0=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.4.3=hb6868eb_1001
- gettext=0.19.8.1=hf34092f_1004
- gflags=2.2.2=he1b5a44_1004
- giflib=5.2.1=h36c2ea0_2
- glib=2.51.4=0
- glog=0.6.0=h6f12383_0
- gmm=5.0=0
- gmp=6.2.1=h58526e2_0
- gsl=2.7=he838d99_0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- gxx_impl_linux-64=11.2.0=h82a94d6_16
- gxx_linux-64=11.2.0=hacbe6df_9
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=hf484d3e_1000
- ilmbase=2.5.5=h780b84a_0
- imagemagick=isis6=h3fd9d12_0
- imath=3.1.5=h6239696_0
- inja=3.3.0=h9c3ff4c_0
- isis=6.0.0=0
- jama=125=0
- jasper=1.900.1=h07fcdf6_1006
- jpeg=9e=h166bdaf_1
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- krb5=1.16.3=hc83ff2d_1000
- laszip=2.1.0=hf484d3e_1
- ld_impl_linux-64=2.36.1=hea4e1c9_2
- libblas=3.9.0=13_linux64_openblas
- libcblas=3.9.0=13_linux64_openblas
- libcurl=7.64.0=h01ee5af_0
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis6=h3fd9d12_0
- libffi=3.2.1=he1b5a44_1007
- libgcc=7.2.0=h69d50b8_2
- libgcc-devel_linux-64=11.2.0=h0952999_16
- libgcc-ng=11.2.0=h1d223b6_15
- libgdal=2.4.1_isis6=h3fd9d12_0
- libgfortran=3.0.0=1
- libgfortran-ng=7.5.0=h14aa051_20
- libgfortran4=7.5.0=h14aa051_20
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=11.2.0=h1d223b6_15
- libiconv=1.15=h516909a_1006
- liblapack=3.9.0=13_linux64_openblas
- liblas=isis6=h3fd9d12_1000
- libnabo=isis6=h3fd9d12_0
- libnsl=2.0.0=h7f98852_0
- libopenblas=0.3.18=hf726d26_0
- libpng=1.6.37=h21135ba_2
- libpointmatcher=isis6=h2bc3f7f_0
- libpq=10.6=h13b8bad_1000
- libprotobuf=3.9.2=h8b12597_0
- libsanitizer=11.2.0=he4da1e4_16
- libssh2=1.8.0=h1ad7b7a_1003
- libstdcxx-devel_linux-64=11.2.0=h0952999_16
- libstdcxx-ng=11.2.0=he4da1e4_15
- libtiff=4.0.10=hc3755c2_1005
- libuuid=2.32.1=h7f98852_1000
- libuv=1.43.0=h7f98852_0
- libwebp=0.5.2=7
- libxcb=1.13=h7f98852_1004
- libxml2=2.9.9=h13577e0_2
- libzlib=1.2.11=h166bdaf_1014
- llvm-openmp=8.0.1=hc9558a2_0
- lz4-c=1.9.3=h9c3ff4c_1
- mesalib=18.3.1=h590aaf7_0
- metis=5.1.0=h58526e2_1006
- mpfr=4.1.0=h9202a9a_1
- mysql=5.7.20=hf484d3e_1001
- mysql-connector-c=6.1.11=hab6429c_1002
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.3=h27087fc_1
- networkx=2.5=py_0
- nlohmann_json=3.10.5=h9c3ff4c_0
- nn=1.86.0=h14c3975_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=hf817b99_0
- openjpeg=2.3.0=hf38bd82_1003
- openmp=8.0.1=0
- openssl=1.0.2u=h516909a_0
- parallel=20220222=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.9.1=h482114b_1002
- pcre=8.45=h9c3ff4c_0
- perl=5.32.1=2_h7f98852_perl5
- pip=21.3.1=pyhd8ed1ab_0
- pixman=0.34.0=h14c3975_1003
- postgresql=10.6=h66cca7a_1000
- proj4=5.2.0=he1b5a44_1006
- protobuf=3.9.2=py36he1b5a44_1
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.3.1=pyhd8ed1ab_0
- python=3.6.7=hd21baee_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2022.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hf8c457e_1001
- rhash=1.4.1=h7f98852_0
- s2p-subset=isis6=h3fd9d12_0
- scipy=1.2.1=py36h09a28d5_1
- setuptools=58.0.4=py36h5fab9bb_2
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h8b20d00_0
- stereo-pipeline=3.1.0=h3fd9d12_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=hfe2efc7_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.5.0=h924138e_1
- theia=isis6=h3fd9d12_1001
- tk=8.6.12=h27826a3_0
- tnt=126=0
- tzcode=2022a=h166bdaf_0
- usgscsm=1.6.0_asp3.1.0=h2bc3f7f_0
- visionworkbench=3.1.0=h3fd9d12_0
- wheel=0.37.1=pyhd8ed1ab_0
- x264=20131218=0
- xerces-c=3.1.4=0
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.0.10=h7f98852_0
- xorg-libsm=1.2.3=hd9c2040_1000
- xorg-libx11=1.7.2=h7f98852_0
- xorg-libxau=1.0.9=h7f98852_0
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h7f98852_1
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-xextproto=7.3.0=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.5=h516909a_1
- yaml=0.2.5=h7f98852_2
- zlib=1.2.11=h166bdaf_1014
- zstd=1.4.9=ha95c52a_0
================================================
FILE: conda/asp_3.1.0_osx_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.8.5=py36hc61eee1_3
- armadillo=9.200.4=hcb88e47_1205
- arpack=3.6.3=h06d5271_1005
- blas=1.1=openblas
- boost=1.68.0=py36h9888f84_1001
- boost-cpp=1.68.0=h6f8c590_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- ca-certificates=2021.10.8=h033912b_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=h636452b_15
- certifi=2021.5.30=py36h79c6626_0
- cmake=3.15.5=h6c18c4b_0
- csm=3.0.3.3=0
- cspice=66=h0d85af4_1015
- curl=7.64.0=heae2a1f_0
- decorator=5.1.1=pyhd8ed1ab_0
- eigen=3.4.0=h940c156_0
- embree=2.16.0=h6834224_0
- expat=2.4.8=h96cf925_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_h5c49c53_1109
- fgr=isis6=h01edc0c_0
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- geoid=1.0=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.4.3=h113155d_1001
- gettext=0.19.8.1=h46ab8bc_1002
- gflags=2.2.2=hb1e8313_1004
- giflib=5.2.1=hbcb3906_2
- glib=2.51.4=0
- glog=0.6.0=h8ac2a54_0
- gmm=5.0=h6aef312_0
- gmp=6.2.1=h2e338ed_0
- gsl=2.7=h93259b0_0
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=h0a44026_1000
- ilmbase=2.5.5=hfab91a5_0
- imagemagick=isis6=h01edc0c_0
- inja=3.3.0=he49afe7_0
- isis=6.0.0=0
- jama=125=0
- jasper=1.900.1=h636a363_1006
- jpeg=9e=h5eb16cf_1
- kakadu=1=0
- krb5=1.16.3=h24a3359_1000
- laszip=2.1.0=h0a44026_1
- libblas=3.9.0=13_osx64_openblas
- libcblas=3.9.0=13_osx64_openblas
- libcurl=7.64.0=h76de61e_0
- libcxx=14.0.3=hc203e6f_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis6=0
- libffi=3.2.1=hb1e8313_1007
- libgcc=4.8.5=hdbeacc1_10
- libgdal=2.4.1_isis6=h01edc0c_0
- libgfortran=3.0.1=0
- libiconv=1.15=h0b31af3_1006
- liblapack=3.9.0=13_osx64_openblas
- liblas=isis6=h01edc0c_1000
- libnabo=isis6=h01edc0c_0
- libopenblas=0.3.18=h9a5756b_0
- libpng=1.6.37=h7cec526_2
- libpointmatcher=isis6=ha5a8b8e_0
- libpq=10.6=hbe1e24e_1000
- libprotobuf=3.9.2=hfbae3c0_0
- libssh2=1.8.0=hf30b1f0_1003
- libtiff=4.0.10=ha78913b_1005
- libuv=1.43.0=h0d85af4_0
- libwebp=0.5.2=7
- libxcb=1.13=h0d85af4_1004
- libxml2=2.9.9=hd80cff7_2
- libzlib=1.2.11=h6c3fc93_1014
- llvm-openmp=14.0.3=ha654fa7_0
- lz4-c=1.9.3=he49afe7_1
- mesalib=21.2.5=h2df1e00_3
- metis=5.1.0=h2e338ed_1006
- mpfr=4.1.0=h0f52abe_1
- mysql=5.7.20=h0a44026_1001
- mysql-connector-c=6.1.11=had4e77e_1002
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.3=h96cf925_1
- networkx=2.5=py_0
- nlohmann_json=3.10.5=he49afe7_0
- nn=1.86.0=h1de35cc_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=h7fa7ffa_0
- openjpeg=2.3.0=h3bf0609_1003
- openssl=1.0.2u=h0b31af3_0
- parallel=20220222=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.9.1=hdd77166_1002
- pcre=8.45=he49afe7_0
- perl=5.32.1=2_h0d85af4_perl5
- pip=21.3.1=pyhd8ed1ab_0
- pixman=0.34.0=h1de35cc_1003
- postgresql=10.6=ha1bbaa7_1000
- proj4=5.2.0=h6de7cb9_1006
- protobuf=3.9.2=py36h6de7cb9_1
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.1=pyhd8ed1ab_0
- python=3.6.7=h4a56312_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2022.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36hfa26744_1
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hcfe32e1_1001
- rhash=1.4.1=h0d85af4_0
- s2p-subset=isis6=h01edc0c_0
- scipy=1.2.1=py36hbd7caa9_1
- setuptools=58.0.4=py36h79c6626_2
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h9721f7c_0
- stereo-pipeline=3.1.0=0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.1=hbced767_1205
- tbb=2021.5.0=hbb4e6a2_1
- theia=isis6=h01edc0c_1001
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- tzcode=2022a=h5eb16cf_0
- usgscsm=1.6.0_asp3.1.0=ha5a8b8e_0
- visionworkbench=3.1.0=0
- wheel=0.37.1=pyhd8ed1ab_0
- x264=20131218=0
- xerces-c=3.1.4=h10f7eb2_0
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.0.10=h0d85af4_0
- xorg-libsm=1.2.3=h0d85af4_1000
- xorg-libx11=1.7.2=h0d85af4_0
- xorg-libxau=1.0.9=h35c211d_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=h0d85af4_1
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.10=h0d85af4_1003
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=h35c211d_1002
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.5=haf1e3a3_1
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.11=h6c3fc93_1014
- zstd=1.4.9=h582d3a0_0
================================================
FILE: conda/asp_3.2.0_linux_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- _sysroot_linux-64_curr_repodata_hack=3=h5bd9786_13
- ale=0.8.8=py39hf939315_1
- alsa-lib=1.2.6.1=h7f98852_0
- aom=3.4.0=h27087fc_1
- armadillo=11.4.2=h7209761_0
- arpack=3.7.0=hdefa2d7_2
- attr=2.5.1=h166bdaf_1
- binutils_impl_linux-64=2.39=he00db2b_1
- binutils_linux-64=2.39=h5fc0e48_11
- blas=2.116=openblas
- blas-devel=3.9.0=16_linux64_openblas
- boost=1.72.0=py39ha90915f_1
- boost-cpp=1.72.0=he72f1d9_7
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- c-ares=1.18.1=h7f98852_0
- ca-certificates=2022.12.7=ha878542_0
- cairo=1.16.0=ha61ee94_1014
- ceres-solver=1.14.0=hf302a74_15
- chrpath=0.16=h7f98852_1002
- cmake=3.15.5=hf94ab9c_0
- csm=3.0.3.3=hc9558a2_0
- cspice=67=h166bdaf_4
- curl=7.86.0=h7bff187_1
- cyrus-sasl=2.1.27=h230043b_5
- dbus=1.13.6=h5008d03_3
- eigen=3.4.0=h4bd325d_0
- elfutils=0.186=he364ef2_0
- embree=2.16.0=0
- expat=2.5.0=h27087fc_0
- ffmpeg=4.4.2=gpl_hfe78399_107
- fftw=3.3.10=nompi_hf0379b8_106
- fgr=isis7=h3fd9d12_0
- flann=1.9.1=he05ef13_1011
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.1=hc2a2eb6_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freeglut=3.2.2=h9c3ff4c_1
- freetype=2.12.1=hca18f0e_1
- gcc_impl_linux-64=12.2.0=hcc96c02_19
- gcc_linux-64=12.2.0=h4798a0e_11
- geoid=1.0_isis7=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.7.1=ha76d385_4
- gettext=0.21.1=h27087fc_0
- gflags=2.2.2=he1b5a44_1004
- gfortran_impl_linux-64=12.2.0=h55be85b_19
- gfortran_linux-64=12.2.0=h307d370_11
- giflib=5.2.1=h36c2ea0_2
- glib=2.74.1=h6239696_1
- glib-tools=2.74.1=h6239696_1
- glog=0.6.0=h6f12383_0
- gmp=6.2.1=h58526e2_0
- gnutls=3.7.8=hf3e180e_0
- graphite2=1.3.13=h58526e2_1001
- gsl=2.7=he838d99_0
- gst-plugins-base=1.20.3=hf6a322e_0
- gstreamer=1.20.3=hd4edc92_2
- gxx_impl_linux-64=12.2.0=hcc96c02_19
- gxx_linux-64=12.2.0=hb41e900_11
- harfbuzz=5.3.0=h418a68e_0
- hdf5=1.12.2=nompi_h2386368_100
- htdp=1.0_isis7=1
- icu=70.1=h27087fc_0
- ilmbase=2.5.5=h780b84a_0
- inja=3.3.0=h9c3ff4c_0
- isis=7.1.0=0
- jack=1.9.18=h8c3723f_1002
- jama=125=0
- jasper=2.0.33=ha77e612_0
- jemalloc=5.2.1=h9c3ff4c_6
- jpeg=9e=h166bdaf_2
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- keyutils=1.6.1=h166bdaf_0
- krb5=1.19.3=h3790be6_0
- lame=3.100=h166bdaf_1003
- laszip=2.1.0_isis7=h3fd9d12_1
- ld_impl_linux-64=2.39=hcc3a1bd_1
- lerc=4.0.0=h27087fc_0
- libarchive=3.5.2=hb890918_3
- libblas=3.9.0=16_linux64_openblas
- libcap=2.64=ha37c62d_0
- libcblas=3.9.0=16_linux64_openblas
- libclang=14.0.6=default_h2e3cab8_0
- libclang13=14.0.6=default_h3a83d3e_0
- libcups=2.3.3=h3e49a29_2
- libcurl=7.86.0=h7bff187_1
- libcxx=14.0.6=hf52228f_0
- libcxxabi=14.0.6=ha770c72_0
- libdb=6.2.32=h9c3ff4c_0
- libdeflate=1.14=h166bdaf_0
- libdrm=2.4.114=h166bdaf_0
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis7=h3fd9d12_0
- libev=4.33=h516909a_1
- libevent=2.1.10=h9b69904_4
- libffi=3.4.2=h7f98852_5
- libflac=1.3.4=h27087fc_0
- libgcc-devel_linux-64=12.2.0=h3b97bd3_19
- libgcc-ng=12.2.0=h65d4601_19
- libgdal=3.5_isis7=h3fd9d12_0
- libgfortran-ng=12.2.0=h69a702a_19
- libgfortran5=12.2.0=h337968e_19
- libglib=2.74.1=h606061b_1
- libglu=9.0.0=he1b5a44_1001
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=12.2.0=h65d4601_19
- libhwloc=2.8.0=h32351e8_1
- libiconv=1.17=h166bdaf_0
- libidn2=2.3.4=h166bdaf_0
- libjemalloc=5.2.1=h9c3ff4c_6
- liblapack=3.9.0=16_linux64_openblas
- liblapacke=3.9.0=16_linux64_openblas
- liblas=1.8.2_isis7=h3fd9d12_0
- libllvm14=14.0.6=he0ac6c6_1
- libmicrohttpd=0.9.75=h2603550_1
- libnabo=isis7=h3fd9d12_0
- libnghttp2=1.47.0=hdcd2b5c_1
- libnsl=2.0.0=h7f98852_0
- libntlm=1.4=h7f98852_1002
- libogg=1.3.4=h7f98852_1
- libopenblas=0.3.21=pthreads_h78a6416_3
- libopencv=4.6.0=py39h04bf7ee_4
- libopus=1.3.1=h7f98852_1
- libpciaccess=0.17=h166bdaf_0
- libpng=1.6.39=h753d276_0
- libpointmatcher=isis7=h2bc3f7f_0
- libpq=14.5=h72a31a5_3
- libprotobuf=3.21.11=h3eb15da_0
- libsanitizer=12.2.0=h46fd767_19
- libsndfile=1.0.31=h9c3ff4c_1
- libsqlite=3.40.0=h753d276_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx-devel_linux-64=12.2.0=h3b97bd3_19
- libstdcxx-ng=12.2.0=h46fd767_19
- libtasn1=4.19.0=h166bdaf_0
- libtiff=4.4.0=h55922b4_4
- libtool=2.4.6=h9c3ff4c_1008
- libudev1=252=h166bdaf_0
- libunistring=0.9.10=h7f98852_0
- libuuid=2.32.1=h7f98852_1000
- libuv=1.44.2=h166bdaf_0
- libva=2.16.0=h166bdaf_0
- libvorbis=1.3.7=h9c3ff4c_0
- libvpx=1.11.0=h9c3ff4c_3
- libwebp=1.2.4=h522a892_0
- libwebp-base=1.2.4=h166bdaf_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=h22db469_4
- libzlib=1.2.13=h166bdaf_4
- llvm-openmp=15.0.6=he0ac6c6_0
- lz4-c=1.9.3=h9c3ff4c_1
- lzo=2.10=h516909a_1000
- mesalib=21.2.5=h0e4506f_3
- metis=5.1.0=h58526e2_1006
- mpfr=4.1.0=h9202a9a_1
- multiview=asp3.2.0=py39h3fd9d12_0
- mysql=8.0.31=h3e2b116_0
- mysql-client=8.0.31=hf89ab62_0
- mysql-common=8.0.31=haf5c9bc_0
- mysql-connector-c=6.1.11=h6eb9d5d_1007
- mysql-devel=8.0.31=haf5c9bc_0
- mysql-libs=8.0.31=h28c427c_0
- mysql-server=8.0.31=hb01f15f_0
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.3=h27087fc_1
- nettle=3.8.1=hc379101_1
- networkx=2.8.8=pyhd8ed1ab_0
- nlohmann_json=3.11.2=h27087fc_0
- nn=1.86.0=h14c3975_2003
- nspr=4.35=h27087fc_0
- nss=3.82=he02c5a1_0
- numpy=1.23.5=py39h3d75532_0
- openblas=0.3.21=pthreads_h320a7e8_3
- opencv=4.6.0=py39hf3d152e_4
- openexr=2.5.5=hf817b99_0
- openh264=2.3.0=h27087fc_0
- openjpeg=2.3.0=hf38bd82_1003
- openssl=1.1.1s=h0b41bf4_1
- p11-kit=0.24.1=hc5aa10d_0
- parallel=20221122=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.11.1=h05311af_1
- pcre2=10.40=hc3806b6_0
- perl=5.32.1=2_h7f98852_perl5
- pip=22.3.1=pyhd8ed1ab_0
- pixman=0.40.0=h36c2ea0_0
- portaudio=19.6.0=h57a0ea0_5
- proj=9.1.0=h93bde94_0
- protobuf=4.21.11=py39h227be39_0
- pthread-stubs=0.4=h36c2ea0_1001
- pulseaudio=14.0=h7f54b18_8
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.6.0=py39hef51801_4
- python=3.9.15=h47a2c10_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2022.6=pyhd8ed1ab_0
- pyyaml=6.0=py39hb9d737c_5
- qhull=2020.2=h4bd325d_2
- qt=5.15.4=hf11cfaa_0
- qt-main=5.15.4=ha5833f6_2
- qt-webengine=5.15.4=hcbadb6c_3
- qwt=6.2.0=h1d9fb53_4
- rapidjson=1.1.0=he1b5a44_1002
- rclone=1.61.1=h519d9b9_0
- readline=8.1.2=h0f457ee_0
- rhash=1.4.3=h166bdaf_0
- rocksdb=6.13.3=hda8cf21_2
- s2p-subset=isis7=h3fd9d12_0
- scipy=1.9.3=py39hddc5342_2
- setuptools=65.5.1=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.9=hbd366e4_2
- spiceypy=5.1.2=pyhd8ed1ab_0
- sqlite=3.40.0=h4ff8645_0
- stereo-pipeline=3.2.0=h3fd9d12_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=h00795ac_0
- svt-av1=1.2.1=h27087fc_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.7.0=h924138e_1
- tbb-devel=2021.7.0=h924138e_1
- tk=8.6.12=h27826a3_0
- tnt=126=0
- tzdata=2022g=h191b570_0
- usgscsm=1.6.0=h924138e_1
- visionworkbench=3.2.0=h3fd9d12_0
- wheel=0.38.4=pyhd8ed1ab_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xcb-util=0.4.0=h166bdaf_0
- xcb-util-image=0.4.0=h166bdaf_0
- xcb-util-keysyms=0.4.0=h166bdaf_0
- xcb-util-renderutil=0.3.9=h166bdaf_0
- xcb-util-wm=0.4.1=h166bdaf_0
- xerces-c=3.2.3=h55805fa_5
- xorg-damageproto=1.2.1=h7f98852_1002
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-glproto=1.4.17=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.0.10=h7f98852_0
- xorg-libsm=1.2.3=hd9c2040_1000
- xorg-libx11=1.7.2=h7f98852_0
- xorg-libxau=1.0.9=h7f98852_0
- xorg-libxdamage=1.1.5=h7f98852_1
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h7f98852_1
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrandr=1.5.2=h7f98852_1
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-randrproto=1.5.0=h7f98852_1001
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-util-macros=1.19.3=h7f98852_0
- xorg-xextproto=7.3.0=h7f98852_1002
- xorg-xf86vidmodeproto=2.3.1=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- zlib=1.2.13=h166bdaf_4
- zstd=1.5.2=h6239696_4
- mesa-libgl-cos6-x86_64
- xorg-libxmu
- mesalib
================================================
FILE: conda/asp_3.2.0_osx_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.8.8=py39h92daf61_1
- aom=3.4.0=hb486fe8_1
- armadillo=11.4.2=hffeb596_0
- arpack=3.7.0=hefb7bc6_2
- blas=2.116=openblas
- blas-devel=3.9.0=16_osx64_openblas
- boost=1.72.0=py39hb64e6f8_1
- boost-cpp=1.72.0=h179ae3a_7
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- c-ares=1.18.1=h0d85af4_0
- ca-certificates=2022.12.7=h033912b_0
- cairo=1.16.0=h904041c_1014
- cctools_osx-64=973.0.1=hcc6d90d_11
- ceres-solver=1.14.0=h636452b_15
- clang=14.0.6=h694c41f_0
- clang-14=14.0.6=default_h55ffa42_0
- clang_osx-64=14.0.6=h3113cd8_4
- clangxx=14.0.6=default_h55ffa42_0
- cmake=3.15.5=h6c18c4b_0
- compiler-rt=14.0.6=h613da45_0
- compiler-rt_osx-64=14.0.6=h8d5cb93_0
- csm=3.0.3.3=0
- cspice=67=hb7f2c08_4
- curl=7.86.0=h57eb407_1
- cyrus-sasl=2.1.27=ha724b88_5
- eigen=3.4.0=h940c156_0
- embree=2.16.0=h6834224_0
- expat=2.5.0=hf0c8a7f_0
- ffmpeg=4.4.2=gpl_h5a1d76f_107
- fftw=3.3.10=nompi_h4fa670e_106
- fgr=isis7=h01edc0c_0
- flann=1.9.1=h56de9e4_1011
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.1=h5bb23bf_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h3f81eb7_1
- geoid=1.0_isis7=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.7.1=he29fd1c_4
- gettext=0.21.1=h8a4c099_0
- gflags=2.2.2=hb1e8313_1004
- gfortran_impl_osx-64=11.3.0=h1f927f5_27
- gfortran_osx-64=11.3.0=h18f7dce_0
- giflib=5.2.1=hbcb3906_2
- glib=2.74.1=hbc0c0cd_1
- glib-tools=2.74.1=hbc0c0cd_1
- glog=0.6.0=h8ac2a54_0
- gmp=6.2.1=h2e338ed_0
- gnutls=3.7.8=h207c4f0_0
- graphite2=1.3.13=h2e338ed_1001
- gsl=2.7=h93259b0_0
- gst-plugins-base=1.20.3=h37e1711_2
- gstreamer=1.20.3=h1d18e73_2
- harfbuzz=5.3.0=h08f8713_0
- hdf5=1.12.2=nompi_hc782337_100
- htdp=1.0_isis7=1
- icu=70.1=h96cf925_0
- ilmbase=2.5.5=hfab91a5_0
- inja=3.3.0=he49afe7_0
- isis=7.1.0=0
- isl=0.25=hb486fe8_0
- jama=125=0
- jasper=2.0.33=h013e400_0
- jemalloc=5.2.1=he49afe7_6
- jpeg=9e=hac89ed1_2
- kakadu=1=0
- krb5=1.19.3=hb49756b_0
- lame=3.100=hb7f2c08_1003
- laszip=2.1.0_isis7=h01edc0c_1
- ld64_osx-64=609=hfd63004_11
- lerc=4.0.0=hb486fe8_0
- libblas=3.9.0=16_osx64_openblas
- libcblas=3.9.0=16_osx64_openblas
- libclang=14.0.6=default_h55ffa42_0
- libclang-cpp14=14.0.6=default_h55ffa42_0
- libclang13=14.0.6=default_hb5731bd_0
- libcurl=7.86.0=h57eb407_1
- libcxx=14.0.6=hccf4f1f_0
- libdeflate=1.14=hb7f2c08_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis7=h01edc0c_0
- libev=4.33=haf1e3a3_1
- libevent=2.1.10=h815e4d9_4
- libffi=3.4.2=h0d85af4_5
- libgdal=3.5_isis7=h01edc0c_0
- libgfortran=5.0.0=9_5_0_h97931a8_26
- libgfortran-devel_osx-64=11.3.0=h824d247_27
- libgfortran5=11.3.0=h082f757_26
- libglib=2.74.1=h4c723e1_1
- libiconv=1.17=hac89ed1_0
- libidn2=2.3.4=hb7f2c08_0
- libjemalloc=5.2.1=he49afe7_6
- liblapack=3.9.0=16_osx64_openblas
- liblapacke=3.9.0=16_osx64_openblas
- liblas=1.8.2_isis7=h01edc0c_0
- libllvm14=14.0.6=h5b596cc_1
- libnabo=isis7=h01edc0c_0
- libnghttp2=1.47.0=h7cbc4dc_1
- libntlm=1.4=h0d85af4_1002
- libogg=1.3.4=h35c211d_1
- libopenblas=0.3.21=openmp_h429af6e_3
- libopencv=4.6.0=py39h743a0d3_4
- libopus=1.3.1=hc929b4f_1
- libpng=1.6.39=ha978bb4_0
- libpointmatcher=isis7=ha5a8b8e_0
- libpq=14.5=h4aa9af9_3
- libprotobuf=3.21.11=hbc0c0cd_0
- libsqlite=3.40.0=ha978bb4_0
- libssh2=1.10.0=h7535e13_3
- libtasn1=4.19.0=hb7f2c08_0
- libtiff=4.4.0=hdb44e8a_4
- libunistring=0.9.10=h0d85af4_0
- libuv=1.44.2=hac89ed1_0
- libvorbis=1.3.7=h046ec9c_0
- libvpx=1.11.0=he49afe7_3
- libwebp=1.2.4=hfa4350a_0
- libwebp-base=1.2.4=h775f41a_0
- libxcb=1.13=h0d85af4_1004
- libxml2=2.9.14=hea49891_4
- libzlib=1.2.13=hfd90126_4
- llvm-openmp=15.0.6=h61d9ccf_0
- llvm-tools=14.0.6=h5b596cc_1
- lz4-c=1.9.3=he49afe7_1
- macports-legacy-support=1.0.7=hb7f2c08_0
- mesalib=21.2.5=h2df1e00_3
- metis=5.1.0=h2e338ed_1006
- mpc=1.2.1=hbb51d92_0
- mpfr=4.1.0=h0f52abe_1
- multiview=asp3.2.0=py39h01edc0c_0
- mysql=8.0.31=h57ddcff_0
- mysql-client=8.0.31=hbbbc359_0
- mysql-common=8.0.31=h7ebae80_0
- mysql-connector-c=6.1.11=h0f02589_1007
- mysql-devel=8.0.31=h7ebae80_0
- mysql-libs=8.0.31=hc37e033_0
- mysql-server=8.0.31=ha134c4c_0
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.3=h96cf925_1
- nettle=3.8.1=h96f3785_1
- networkx=2.8.8=pyhd8ed1ab_0
- nlohmann_json=3.11.2=hbbd2c75_0
- nn=1.86.0=h1de35cc_2003
- nspr=4.35=hea0b92c_0
- nss=3.78=ha8197d3_0
- numpy=1.23.5=py39hdfa1d0c_0
- openblas=0.3.21=openmp_hbefa662_3
- opencv=4.6.0=py39h6e9494a_4
- openexr=2.5.5=h7fa7ffa_0
- openh264=2.3.0=hb486fe8_0
- openjpeg=2.3.0=h3bf0609_1003
- openssl=1.1.1s=hfd90126_1
- p11-kit=0.24.1=h65f8906_0
- parallel=20221122=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.11.1=h7984e4d_1
- pcre2=10.40=h1c4e4bc_0
- perl=5.32.1=2_h0d85af4_perl5
- pip=22.3.1=pyhd8ed1ab_0
- pixman=0.40.0=hbcb3906_0
- proj=9.1.0=hcbd9701_0
- protobuf=4.21.11=py39h7a8716b_0
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.6.0=py39h71a6800_4
- python=3.9.15=h531fd05_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2022.6=pyhd8ed1ab_0
- pyyaml=6.0=py39ha30fb19_5
- qhull=2020.2=h940c156_2
- qt=5.15.4=hb3ad848_0
- qt-main=5.15.4=h938c29d_2
- qt-webengine=5.15.4=h72ca1e5_3
- qwt=6.2.0=h4cc5820_4
- rapidjson=1.1.0=hb1e8313_1002
- readline=8.1.2=h3899abd_0
- rhash=1.4.3=hac89ed1_0
- rocksdb=6.13.3=hbb73eaa_2
- s2p-subset=isis7=h01edc0c_0
- scipy=1.9.3=py39h8a15683_2
- setuptools=65.5.1=pyhd8ed1ab_0
- sigtool=0.1.3=h57ddcff_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.9=h225ccf5_2
- spiceypy=5.1.2=pyhd8ed1ab_0
- sqlite=3.40.0=h9ae0607_0
- stereo-pipeline=3.2.0=h01edc0c_0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.2=h1f0f902_0
- svt-av1=1.2.1=hbbd2c75_0
- tapi=1100.0.11=h9ce4665_0
- tbb=2021.7.0=hb8565cd_1
- tbb-devel=2021.7.0=hb8565cd_1
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- tzdata=2022g=h191b570_0
- usgscsm=1.6.0=hb8565cd_1
- visionworkbench=3.2.0=h01edc0c_0
- wheel=0.38.4=pyhd8ed1ab_0
- x264=1!164.3095=h775f41a_2
- x265=3.5=hbb4e6a2_3
- xerces-c=3.2.3=hf5b2a72_5
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.0.10=h0d85af4_0
- xorg-libsm=1.2.3=h0d85af4_1000
- xorg-libx11=1.7.2=h0d85af4_0
- xorg-libxau=1.0.9=h35c211d_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=h0d85af4_1
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.10=h0d85af4_1003
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=h35c211d_1002
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.6=h775f41a_0
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.13=hfd90126_4
- zstd=1.5.2=hfa58983_4
================================================
FILE: conda/asp_3.3.0_linux_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- _sysroot_linux-64_curr_repodata_hack=3=h69a702a_13
- ale=0.9.1=py39h7633fee_0
- alsa-lib=1.2.7.2=h166bdaf_0
- aom=3.5.0=h27087fc_0
- armadillo=12.6.1=h0a193a4_0
- arpack=3.7.0=hdefa2d7_2
- blas=2.117=openblas
- blas-devel=3.9.0=17_linux64_openblas
- boost=1.72.0=py39ha90915f_1
- boost-cpp=1.72.0=h359cf19_6
- brotli-python=1.1.0=py39h3d6467e_0
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- c-ares=1.19.1=hd590300_0
- ca-certificates=2023.7.22=hbcca054_0
- cairo=1.16.0=ha12eb4b_1010
- ceres-solver=1.14.0=hf302a74_15
- certifi=2023.7.22=pyhd8ed1ab_0
- charset-normalizer=3.2.0=pyhd8ed1ab_0
- chrpath=0.16=h7f98852_1002
- csm=3.0.3.3=hc9558a2_0
- cspice=67=h166bdaf_4
- curl=7.87.0=h6312ad2_0
- cyrus-sasl=2.1.27=h957375c_6
- dbus=1.13.6=h5008d03_3
- eigen=3.4.0=h00ab1b0_0
- elfutils=0.188=hbb17bd0_0
- embree=2.17.7=ha770c72_3
- expat=2.5.0=hcb278e6_1
- ffmpeg=4.4.2=gpl_hbd009f3_109
- fftw=3.3.10=nompi_hc118613_108
- fgr=isis7=h3fd9d12_0
- flann=1.9.1=hfe772e8_1010
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freeglut=3.2.2=h9c3ff4c_1
- freetype=2.12.1=hca18f0e_1
- geoid=1.0_isis7=1
- geos=3.9.1=h9c3ff4c_2
- geotiff=1.7.1=ha76d385_4
- gettext=0.21.1=h27087fc_0
- gflags=2.2.2=he1b5a44_1004
- giflib=5.2.1=h0b41bf4_3
- glib=2.76.4=hfc55251_0
- glib-tools=2.76.4=hfc55251_0
- glog=0.6.0=h6f12383_0
- gmp=6.2.1=h58526e2_0
- gnutls=3.7.8=hf3e180e_0
- graphite2=1.3.13=h58526e2_1001
- gsl=2.6=he838d99_2
- gst-plugins-base=1.20.3=h57caac4_2
- gstreamer=1.20.3=hd4edc92_2
- harfbuzz=4.2.0=h40b6f09_0
- hdf5=1.12.1=nompi_h2386368_104
- htdp=1.0_isis7=1
- icu=69.1=h9c3ff4c_0
- idna=3.4=pyhd8ed1ab_0
- ilmbase=2.5.5=h780b84a_0
- inja=3.3.0=h9c3ff4c_0
- isis=8.0.0=np125_0
- jama=125=0
- jasper=2.0.33=h0ff4b12_1
- jemalloc=5.3.0=hcb278e6_0
- jpeg=9e=h0b41bf4_3
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- keyutils=1.6.1=h166bdaf_0
- krb5=1.20.1=hf9c8cef_0
- lame=3.100=h166bdaf_1003
- laszip=2.1.0_isis7=h3fd9d12_1
- ld_impl_linux-64=2.40=h41732ed_0
- lerc=4.0.0=h27087fc_0
- libarchive=3.5.2=hb890918_3
- libblas=3.9.0=17_linux64_openblas
- libcblas=3.9.0=17_linux64_openblas
- libclang=13.0.1=default_h7634d5b_3
- libcurl=7.87.0=h6312ad2_0
- libcxx=16.0.6=h00ab1b0_0
- libcxxabi=16.0.6=ha770c72_0
- libdeflate=1.14=h166bdaf_0
- libdrm=2.4.114=h166bdaf_0
- libdrm-cos6-x86_64=2.4.65=4
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis7=h3fd9d12_0
- libev=4.33=h516909a_1
- libevent=2.1.10=h9b69904_4
- libexpat=2.5.0=hcb278e6_1
- libffi=3.4.2=h7f98852_5
- libgcc-ng=13.1.0=he5830b7_0
- libgdal=3.5_isis8=h3fd9d12_0
- libgfortran-ng=13.1.0=h69a702a_0
- libgfortran5=13.1.0=h15d22d2_0
- libglib=2.76.4=hebfc3b9_0
- libglu=9.0.0=he1b5a44_1001
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libhwloc=2.8.0=h32351e8_1
- libiconv=1.17=h166bdaf_0
- libidn2=2.3.4=h166bdaf_0
- libjemalloc=5.3.0=hcb278e6_0
- liblapack=3.9.0=17_linux64_openblas
- liblapacke=3.9.0=17_linux64_openblas
- liblas=1.8.2_isis8=h3fd9d12_0
- libllvm13=13.0.1=hf817b99_2
- libmicrohttpd=0.9.77=h97afed2_0
- libnabo=isis7=h3fd9d12_0
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.0=h7f98852_0
- libntlm=1.4=h7f98852_1002
- libogg=1.3.4=h7f98852_1
- libopenblas=0.3.23=pthreads_h80387f5_0
- libopencv=4.5.5=py39hb0e02d1_7
- libopus=1.3.1=h7f98852_1
- libpciaccess=0.17=h166bdaf_0
- libpng=1.6.39=h753d276_0
- libpointmatcher=isis7=h2bc3f7f_0
- libpq=14.5=h2baec63_5
- libprotobuf=3.19.6=h3eb15da_0
- libsqlite=3.43.0=h2797004_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx-ng=13.1.0=hfd8a6a1_0
- libtasn1=4.19.0=h166bdaf_0
- libtiff=4.4.0=h82bc61c_5
- libunistring=0.9.10=h7f98852_0
- libuuid=2.38.1=h0b41bf4_0
- libva=2.18.0=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libvpx=1.11.0=h9c3ff4c_3
- libwebp-base=1.3.1=hd590300_0
- libx11-common-cos6-x86_64=1.6.4=4
- libx11-cos6-x86_64=1.6.4=4
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=haae042b_4
- libzlib=1.2.13=hd590300_5
- llvm-openmp=16.0.6=h4dfa4b3_0
- lz4-c=1.9.3=h9c3ff4c_1
- lzo=2.10=h516909a_1000
- mesa-libgl-cos6-x86_64=11.0.7=4
- mesalib=23.0.0=h0fe20ba_0
- metis=5.1.0=h59595ed_1007
- mpfr=4.2.0=hb012696_0
- multiview=isis8=py39h3fd9d12_0
- mysql=8.0.28=h3e2b116_2
- mysql-client=8.0.28=hf89ab62_2
- mysql-common=8.0.28=haf5c9bc_2
- mysql-connector-c=6.1.11=h6eb9d5d_1007
- mysql-devel=8.0.28=haf5c9bc_2
- mysql-libs=8.0.28=h28c427c_2
- mysql-server=8.0.28=hb253900_2
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.4=hcb278e6_0
- nettle=3.8.1=hc379101_1
- networkx=3.1=pyhd8ed1ab_0
- nlohmann_json=3.11.2=h27087fc_0
- nn=1.86.0=hd590300_2003
- nspr=4.35=h27087fc_0
- nss=3.92=h1d7d5a4_0
- numpy=1.25.2=py39h6183b62_0
- openblas=0.3.23=pthreads_h855a84d_0
- opencv=4.5.5=py39hf3d152e_7
- openexr=2.5.5=hf817b99_0
- openh264=2.3.1=hcb278e6_2
- openjpeg=2.3.0=hf38bd82_1003
- openssl=1.1.1v=hd590300_0
- p11-kit=0.24.1=hc5aa10d_0
- packaging=23.1=pyhd8ed1ab_0
- parallel=20230722=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.11.1=h05311af_1
- pcre2=10.40=hc3806b6_0
- perl=5.32.1=4_hd590300_perl5
- pip=23.2.1=pyhd8ed1ab_0
- pixman=0.40.0=h36c2ea0_0
- platformdirs=3.10.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proj=9.1.0=h93bde94_0
- protobuf=3.19.6=py39h227be39_0
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.5.5=py39hef51801_7
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.15=h47a2c10_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py39hd1e30aa_0
- qhull=2020.2=h4bd325d_2
- qt=5.12.9=h1304e3e_6
- qwt=6.2.0=hb19a904_2
- rapidjson=1.1.0=he1b5a44_1002
- rclone=1.63.1=h519d9b9_0
- readline=8.2=h8228510_1
- requests=2.31.0=pyhd8ed1ab_0
- rocksdb=6.13.3=hda8cf21_2
- s2p-subset=isis7=h3fd9d12_0
- scipy=1.11.2=py39h6183b62_0
- setuptools=68.1.2=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- spiceypy=6.0.0=pyhd8ed1ab_0
- sqlite=3.43.0=h2c6b66d_0
- stereo-pipeline=3.3.0=h3fd9d12_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=h00795ac_0
- svt-av1=1.3.0=h27087fc_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.7.0=h924138e_1
- tbb-devel=2021.7.0=h924138e_1
- tk=8.6.12=h27826a3_0
- tnt=126=0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- urllib3=2.0.4=pyhd8ed1ab_0
- usgscsm=1.7.0=h00ab1b0_1
- visionworkbench=3.3.0=h3fd9d12_0
- wheel=0.41.2=pyhd8ed1ab_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xerces-c=3.2.3=h8ce2273_4
- xorg-damageproto=1.2.1=h7f98852_1002
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-glproto=1.4.17=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.1.1=hd590300_0
- xorg-libsm=1.2.4=h7391055_0
- xorg-libx11=1.8.4=h0b41bf4_0
- xorg-libxau=1.0.11=hd590300_0
- xorg-libxdamage=1.1.5=h7f98852_1
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrandr=1.5.2=h7f98852_1
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-randrproto=1.5.0=h7f98852_1001
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-util-macros=1.19.3=h7f98852_0
- xorg-xextproto=7.3.0=h0b41bf4_1003
- xorg-xf86vidmodeproto=2.3.1=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- zlib=1.2.13=hd590300_5
- zstd=1.5.5=hfc55251_0
================================================
FILE: conda/asp_3.3.0_osx_env.yaml
================================================
name: asp
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.9.1=py39h8ee36c8_0
- aom=3.5.0=hf0c8a7f_0
- armadillo=12.6.1=hdc495e4_0
- arpack=3.7.0=hefb7bc6_2
- blas=2.117=openblas
- blas-devel=3.9.0=17_osx64_openblas
- boost=1.72.0=py39hb64e6f8_1
- boost-cpp=1.72.0=hf3dc895_6
- brotli-python=1.1.0=py39h840bb9f_0
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- c-ares=1.19.1=h0dc2134_0
- ca-certificates=2023.7.22=h8857fd0_0
- cairo=1.16.0=h9e0e54b_1010
- ceres-solver=1.14.0=h636452b_15
- certifi=2023.7.22=pyhd8ed1ab_0
- charset-normalizer=3.2.0=pyhd8ed1ab_0
- csm=3.0.3.3=0
- cspice=67=hb7f2c08_4
- curl=8.2.1=h5f667d7_0
- cyrus-sasl=2.1.27=hf9bab2b_7
- eigen=3.4.0=h1c7c39f_0
- embree=2.17.7=h694c41f_3
- expat=2.5.0=hf0c8a7f_1
- ffmpeg=4.4.2=gpl_hff0bab5_109
- fftw=3.3.10=nompi_h4fa670e_108
- fgr=isis7=h01edc0c_0
- flann=1.9.1=h30321d8_1010
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=h5bb23bf_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h3f81eb7_1
- geoid=1.0_isis7=1
- geos=3.9.1=he49afe7_2
- geotiff=1.7.1=he29fd1c_4
- gettext=0.21.1=h8a4c099_0
- gflags=2.2.2=hb1e8313_1004
- giflib=5.2.1=hb7f2c08_3
- glog=0.6.0=h8ac2a54_0
- gmp=6.2.1=h2e338ed_0
- gnutls=3.7.8=h207c4f0_0
- graphite2=1.3.13=h2e338ed_1001
- gsl=2.6=h71c5fe9_2
- harfbuzz=4.2.0=h48644e2_0
- hdf5=1.12.1=nompi_h0aa1fa2_104
- htdp=1.0_isis7=1
- icu=69.1=he49afe7_0
- idna=3.4=pyhd8ed1ab_0
- ilmbase=2.5.5=hfab91a5_0
- inja=3.3.0=he49afe7_0
- isis=8.0.0=np125_0
- jama=125=0
- jasper=2.0.33=h7c6fec8_1
- jemalloc=5.3.0=hf0c8a7f_0
- jpeg=9e=hb7f2c08_3
- kakadu=1=0
- krb5=1.21.1=hb884880_0
- lame=3.100=hb7f2c08_1003
- laszip=2.1.0_isis7=h01edc0c_1
- lerc=4.0.0=hb486fe8_0
- libblas=3.9.0=17_osx64_openblas
- libcblas=3.9.0=17_osx64_openblas
- libclang=13.0.1=root_62804_h2961583_3
- libcurl=8.2.1=h5f667d7_0
- libcxx=16.0.6=hd57cbcb_0
- libdeflate=1.14=hb7f2c08_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis7=h01edc0c_0
- libev=4.33=haf1e3a3_1
- libevent=2.1.10=h7d65743_4
- libexpat=2.5.0=hf0c8a7f_1
- libffi=3.4.2=h0d85af4_5
- libgdal=3.5_isis8=hf8dc8b4_0
- libgfortran=5.0.0=12_3_0_h97931a8_1
- libgfortran5=12.3.0=hbd3c1fe_1
- libglib=2.76.4=hc62aa5d_0
- libiconv=1.17=hac89ed1_0
- libidn2=2.3.4=hb7f2c08_0
- libjemalloc=5.3.0=hf0c8a7f_0
- liblapack=3.9.0=17_osx64_openblas
- liblapacke=3.9.0=17_osx64_openblas
- liblas=1.8.2_isis8=hf8dc8b4_0
- libllvm13=13.0.1=h64f94b2_2
- libnabo=isis7=h01edc0c_0
- libnghttp2=1.52.0=he2ab024_0
- libntlm=1.4=h0d85af4_1002
- libopenblas=0.3.23=openmp_h429af6e_0
- libopencv=4.5.5=py39hc2bf5a6_7
- libpng=1.6.39=ha978bb4_0
- libpointmatcher=isis7=ha5a8b8e_0
- libpq=14.5=h3df487d_7
- libprotobuf=3.19.6=hbc0c0cd_0
- libsqlite=3.42.0=h58db7d2_0
- libssh2=1.11.0=hd019ec5_0
- libtasn1=4.19.0=hb7f2c08_0
- libtiff=4.4.0=h6268bbc_5
- libunistring=0.9.10=h0d85af4_0
- libvpx=1.11.0=he49afe7_3
- libwebp-base=1.3.1=h0dc2134_0
- libxcb=1.15=hb7f2c08_0
- libxml2=2.9.14=h1faee8b_4
- libzlib=1.2.13=h8a1eda9_5
- llvm-openmp=16.0.6=hff08bdf_0
- lz4-c=1.9.3=he49afe7_1
- macports-legacy-support=1.0.13=h0dc2134_0
- mesalib=23.1.4=hb59017c_0
- metis=5.1.0=he965462_1007
- mpfr=4.2.0=h4f9bd69_0
- multiview=isis8=py39hf8dc8b4_0
- mysql=8.0.28=h88f4db0_2
- mysql-client=8.0.28=h7ddd48c_2
- mysql-common=8.0.28=hdd8d184_2
- mysql-devel=8.0.28=hdd8d184_2
- mysql-libs=8.0.28=h353f102_2
- mysql-server=8.0.28=h6edde1b_2
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.4=hf0c8a7f_0
- nettle=3.8.1=h96f3785_1
- networkx=3.1=pyhd8ed1ab_0
- nlohmann_json=3.11.2=hbbd2c75_0
- nn=1.86.0=h0dc2134_2003
- nspr=4.35=hea0b92c_0
- nss=3.92=hd6ac835_0
- numpy=1.25.2=py39h892e69a_0
- openblas=0.3.23=openmp_hbefa662_0
- opencv=4.5.5=py39h6e9494a_7
- openexr=2.5.5=h7fa7ffa_0
- openh264=2.3.1=hf0c8a7f_2
- openjpeg=2.3.0=h3bf0609_1003
- openssl=3.1.2=h8a1eda9_0
- p11-kit=0.24.1=h65f8906_0
- packaging=23.1=pyhd8ed1ab_0
- parallel=20230722=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.11.1=h7984e4d_1
- pcre2=10.40=h1c4e4bc_0
- perl=5.32.1=4_h0dc2134_perl5
- pip=23.2.1=pyhd8ed1ab_0
- pixman=0.40.0=hbcb3906_0
- platformdirs=3.10.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proj=9.1.0=hcbd9701_0
- protobuf=3.19.6=py39h7a8716b_0
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.5.5=py39h71a6800_7
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.17=h07e1443_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py39hdc70f33_0
- qhull=2020.2=h940c156_2
- qt=5.12.9=h2a607e2_5
- qwt=6.2.0=h0f3c874_2
- rapidjson=1.1.0=hb1e8313_1002
- rclone=1.63.1=h1762f63_0
- readline=8.2=h9e318b2_1
- requests=2.31.0=pyhd8ed1ab_0
- rocksdb=6.13.3=hbb73eaa_2
- s2p-subset=isis7=h01edc0c_0
- scipy=1.11.2=py39hded996c_0
- setuptools=68.1.2=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.10=h225ccf5_0
- spiceypy=6.0.0=pyhd8ed1ab_0
- sqlite=3.42.0=h2b0dec6_0
- stereo-pipeline=3.3.0=hf8dc8b4_0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.2=h1f0f902_0
- svt-av1=1.3.0=hf0c8a7f_0
- tbb=2021.10.0.custom_asp=h6b95b14_0
- tbb-devel=2021.10.0.custom_asp=h6b95b14_0
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- urllib3=2.0.4=pyhd8ed1ab_0
- usgscsm=1.7.0=h1c7c39f_1
- visionworkbench=3.3.0=hf8dc8b4_0
- wheel=0.41.2=pyhd8ed1ab_0
- x264=1!164.3095=h775f41a_2
- x265=3.5=hbb4e6a2_3
- xerces-c=3.2.3=h6564042_4
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.1.1=h0dc2134_0
- xorg-libsm=1.2.4=h0dc2134_0
- xorg-libx11=1.8.6=hbd0b022_0
- xorg-libxau=1.0.11=h0dc2134_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=hb7f2c08_2
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.11=h0dc2134_0
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=hb7f2c08_1003
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.6=h775f41a_0
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.13=h8a1eda9_5
- zstd=1.5.5=h829000d_0
================================================
FILE: conda/asp_3.5.0_linux_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- nodefaults
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=3_kmp_llvm
- affine=2.4.0=pyhd8ed1ab_1
- aiohappyeyeballs=2.6.1=pyhd8ed1ab_0
- aiohttp=3.11.18=py311h2dc5d0c_0
- aiosignal=1.3.2=pyhd8ed1ab_0
- ale=0.10.0=py311hd18a35c_3
- alsa-lib=1.2.9=hd590300_0
- aom=3.7.1=h59595ed_0
- armadillo=14.0.2=h88fc5b9_0
- arpack=3.9.1=nompi_hf03ea27_102
- atk-1.0=2.38.0=hd4edc92_1
- attr=2.5.1=h166bdaf_1
- attrs=25.3.0=pyh71513ae_0
- aws-c-auth=0.7.4=h1083cbe_2
- aws-c-cal=0.6.2=h09139f6_2
- aws-c-common=0.9.3=hd590300_0
- aws-c-compression=0.2.17=h184a658_3
- aws-c-event-stream=0.3.2=h6fea174_2
- aws-c-http=0.7.13=hb59894b_2
- aws-c-io=0.13.33=h161b759_0
- aws-c-mqtt=0.9.7=h55cd26b_0
- aws-c-s3=0.3.17=hfb4bb88_4
- aws-c-sdkutils=0.1.12=h184a658_2
- aws-checksums=0.1.17=h184a658_2
- aws-crt-cpp=0.24.2=ha28989d_2
- aws-sdk-cpp=1.10.57=hec69fbc_24
- binutils=2.43=h4852527_4
- binutils_impl_linux-64=2.43=h4bf12b8_4
- binutils_linux-64=2.43=h4852527_4
- blas=2.131=openblas
- blas-devel=3.9.0=31_h1ea3ea9_openblas
- blosc=1.21.6=hef167b5_0
- boost=1.78.0=py311h92ebd52_5
- boost-cpp=1.78.0=h6582d0a_3
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h4bc722e_7
- c-ares=1.34.5=hb9d3cd8_0
- c-compiler=1.9.0=h2b85faf_0
- ca-certificates=2025.1.31=hbd8a1cb_1
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- cairo=1.16.0=hbbf8b49_1016
- ceres-solver=2.2.0=h30ec75d_2
- certifi=2025.1.31=pyhd8ed1ab_0
- cfitsio=4.3.0=hbdc6101_0
- chrpath=0.16=h7f98852_1002
- cmake=3.27.9=hcfe8598_0
- compilers=1.9.0=ha770c72_0
- csm=3.0.3.3=h84d6215_1
- cspice=67=hb9d3cd8_8
- curl=8.4.0=hca28451_0
- cxx-compiler=1.9.0=h1a2810e_0
- cyrus-sasl=2.1.27=h54b06d7_7
- dav1d=1.2.1=hd590300_0
- dbus=1.13.6=h5008d03_3
- double-conversion=3.3.1=h5888daf_0
- eigen=3.4.0=h00ab1b0_0
- elfutils=0.189=hde5d1a3_0
- embree=3.13.0=habf647b_1
- expat=2.5.0=hcb278e6_1
- ffmpeg=6.1.0=gpl_h402741f_101
- fftw=3.3.10=nompi_hf1063bd_110
- fgr=asp3.5.0=h3fd9d12_0
- flann=1.9.1=h941a29b_1013
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fortran-compiler=1.9.0=h36df796_0
- freeglut=3.2.2=hac7e632_2
- freetype=2.13.3=ha770c72_1
- freexl=1.0.6=h166bdaf_1
- fribidi=1.0.10=h36c2ea0_0
- frozenlist=1.5.0=py311h2dc5d0c_1
- gcc=13.3.0=h9576a4e_2
- gcc_impl_linux-64=13.3.0=h1e990d8_2
- gcc_linux-64=13.3.0=hc28eda2_10
- gdk-pixbuf=2.42.10=h6b639ba_2
- geoid=1.0_isis7=1
- geos=3.12.0=h59595ed_0
- geotiff=1.7.1=h22adcc9_11
- gettext=0.23.1=h5888daf_0
- gettext-tools=0.23.1=h5888daf_0
- gflags=2.2.2=h5888daf_1005
- gfortran=13.3.0=h9576a4e_2
- gfortran_impl_linux-64=13.3.0=h84c1745_2
- gfortran_linux-64=13.3.0=hb919d3a_10
- giflib=5.2.2=hd590300_0
- gl2ps=1.4.2=hae5d5c5_1
- glew=2.1.0=h9c3ff4c_2
- glib=2.78.1=hfc55251_0
- glib-tools=2.78.1=hfc55251_0
- glog=0.7.1=hbabe93e_0
- gmp=6.3.0=hac33072_2
- gnutls=3.7.9=hb077bed_0
- graphite2=1.3.13=h59595ed_1003
- graphviz=8.1.0=h28d9a01_0
- greenlet=3.2.0=py311hfdbb021_0
- gsl=2.6=he838d99_2
- gst-plugins-base=1.22.5=hf7dbed1_0
- gstreamer=1.22.5=h98fc4e7_0
- gtk2=2.24.33=h90689f9_2
- gts=0.7.6=h977cf35_4
- gxx=13.3.0=h9576a4e_2
- gxx_impl_linux-64=13.3.0=hae580e1_2
- gxx_linux-64=13.3.0=h6834431_10
- h5py=3.9.0=nompi_py311he78b9b8_101
- harfbuzz=7.3.0=hdb3a94d_0
- hdf4=4.2.15=h501b40f_6
- hdf5=1.14.1=nompi_h4f84152_100
- icu=72.1=hcb278e6_0
- idna=3.10=pyhd8ed1ab_1
- ilmbase=2.5.5=h3fd9d12_1
- inja=3.3.0=h9c3ff4c_0
- isis=8.3.0=0
- jama=125=0
- jasper=4.0.0=h32699f2_1
- jinja2=3.1.6=pyhd8ed1ab_0
- json-c=0.17=h1220068_1
- jsoncpp=1.9.5=h4bd325d_1
- kakadu=1=0
- kalasiris=1.11.0=pyhd8ed1ab_1
- kealib=1.5.1=h3e6883b_4
- kernel-headers_linux-64=3.10.0=he073ed8_18
- keyutils=1.6.1=h166bdaf_0
- krb5=1.21.2=h659d440_0
- lame=3.100=h166bdaf_1003
- lcms2=2.15=haa2dc70_1
- ld_impl_linux-64=2.43=h712a8e2_4
- lerc=4.0.0=h0aef613_1
- libaec=1.1.3=h59595ed_0
- libarchive=3.6.2=h039dbb9_1
- libasprintf=0.23.1=h8e693c7_0
- libasprintf-devel=0.23.1=h8e693c7_0
- libass=0.17.1=hc9aadba_0
- libblas=3.9.0=31_h59b9bed_openblas
- libcap=2.71=h39aace5_0
- libcblas=3.9.0=31_he106b2a_openblas
- libclang=15.0.7=default_h127d8a8_5
- libclang13=15.0.7=default_h5d6823c_5
- libcups=2.3.3=h4637d8d_4
- libcurl=8.4.0=hca28451_0
- libcxx=19.1.7=h2713693_1
- libcxxabi=19.1.7=hd85fd95_1
- libdeflate=1.18=h0b41bf4_0
- libdrm=2.4.124=hb9d3cd8_0
- libdrm-cos7-x86_64=2.4.97=ha675448_1106
- libedit=3.1.20250104=pl5321h7949ede_0
- libelas=asp3.5.0=h3fd9d12_0
- libev=4.33=hd590300_2
- libevent=2.1.12=hf998b51_1
- libexpat=2.5.0=hcb278e6_1
- libffi=3.4.6=h2dba641_1
- libflac=1.4.3=h59595ed_0
- libfreetype=2.13.3=ha770c72_1
- libfreetype6=2.13.3=h48d6fc4_1
- libgcc=14.2.0=h767d61c_2
- libgcc-devel_linux-64=13.3.0=hc03c837_102
- libgcc-ng=14.2.0=h69a702a_2
- libgcrypt-lib=1.11.0=hb9d3cd8_2
- libgd=2.3.3=hfa28ad5_6
- libgdal=3.7.1=hd2ada2b_6
- libgettextpo=0.23.1=h5888daf_0
- libgettextpo-devel=0.23.1=h5888daf_0
- libgfortran=14.2.0=h69a702a_2
- libgfortran-ng=14.2.0=h69a702a_2
- libgfortran5=14.2.0=hf1ad2bd_2
- libglib=2.78.1=hebfc3b9_0
- libglu=9.0.0=hac7e632_1003
- libglvnd=1.7.0=ha4b6fd6_2
- libglvnd-cos7-x86_64=1.0.1=ha675448_1106
- libglvnd-glx-cos7-x86_64=1.0.1=ha675448_1106
- libgomp=14.2.0=h767d61c_2
- libgpg-error=1.54=hbd13f7d_0
- libhwloc=2.9.3=default_h554bfaf_1009
- libiconv=1.18=h4ce23a2_1
- libidn2=2.3.8=ha4ef2c3_0
- libjemalloc=5.3.0=h5888daf_1
- libjpeg-turbo=2.1.5.1=hd590300_1
- libkml=1.3.0=h01aab08_1016
- liblapack=3.9.0=31_h7ac8fdf_openblas
- liblapacke=3.9.0=31_he2f377e_openblas
- libllvm15=15.0.7=h5cf9203_3
- libllvm17=17.0.5=h5cf9203_0
- libltdl=2.4.3a=h5888daf_0
- liblzma=5.8.1=hb9d3cd8_0
- liblzma-devel=5.8.1=hb9d3cd8_0
- libmicrohttpd=0.9.77=h97afed2_0
- libnabo=asp3.5.0=h3fd9d12_1
- libnetcdf=4.9.2=nompi_h7e745eb_109
- libnghttp2=1.58.0=h47da74e_0
- libnsl=2.0.1=hd590300_0
- libntlm=1.8=hb9d3cd8_0
- libogg=1.3.5=h4ab18f5_0
- libopenblas=0.3.29=pthreads_h94d23a6_0
- libopencv=4.7.0=py311h8aafb54_6
- libopus=1.5.2=hd0c01bc_0
- libpciaccess=0.18=hd590300_0
- libpng=1.6.47=h943b412_0
- libpointmatcher=asp3.5.0=h2bc3f7f_1
- libpq=15.4=hfc447b1_2
- libprotobuf=3.21.12=hfc55251_2
- librsvg=2.56.3=h98fae49_0
- librttopo=1.1.0=hb58d41b_14
- libsanitizer=13.3.0=he8ea267_2
- libsndfile=1.2.2=hc60ed4a_1
- libspatialite=5.0.1=h15f6e67_28
- libsqlite=3.49.1=hee588c1_2
- libssh2=1.11.0=h0841786_0
- libstdcxx=14.2.0=h8f9b012_2
- libstdcxx-devel_linux-64=13.3.0=hc03c837_102
- libstdcxx-ng=14.2.0=h4852527_2
- libsystemd0=256.9=h2774228_0
- libtasn1=4.20.0=hb9d3cd8_0
- libtheora=1.1.1=h4ab18f5_1006
- libtiff=4.5.1=h8b53f26_1
- libtool=2.5.4=h5888daf_0
- libunistring=0.9.10=h7f98852_0
- libuuid=2.38.1=h0b41bf4_0
- libuv=1.50.0=hb9d3cd8_0
- libva=2.21.0=h4ab18f5_2
- libvorbis=1.3.7=h9c3ff4c_0
- libvpx=1.13.1=h59595ed_0
- libwebp=1.3.1=hbf2b3c1_0
- libwebp-base=1.3.1=hd590300_0
- libxcb=1.15=h0b41bf4_0
- libxcrypt=4.4.36=hd590300_1
- libxkbcommon=1.6.0=h5d7e998_0
- libxml2=2.11.5=h0d562d8_0
- libzip=1.10.1=h2629f0a_3
- libzlib=1.3.1=hb9d3cd8_2
- llvm-openmp=16.0.6=h4dfa4b3_0
- loguru=0.7.2=py311h38be061_2
- lz4-c=1.9.4=hcb278e6_0
- lzo=2.10=hd590300_1001
- markupsafe=3.0.2=py311h2dc5d0c_1
- mesa-libgl-cos7-x86_64=18.3.4=ha675448_1106
- mesa-libglapi-cos7-x86_64=18.3.4=ha675448_1106
- mesalib=23.2.1=h6b56f8e_0
- metis=5.1.0=hd0bcaf9_1007
- mpfr=4.2.1=h90cbb55_3
- mpg123=1.32.9=hc50e24c_0
- msgpack-python=1.1.0=py311hd18a35c_0
- multidict=6.4.3=py311h2dc5d0c_0
- multiview=asp_3.5.0=py311_2
- mysql=8.0.33=h27aab58_2
- mysql-client=8.0.33=h545f5f4_2
- mysql-common=8.0.33=hf1915f5_2
- mysql-connector-c=6.1.11=h659d440_1008
- mysql-devel=8.0.33=hf1915f5_2
- mysql-libs=8.0.33=hca2cd23_2
- mysql-server=8.0.33=ha473b58_2
- nanoflann=1.6.1=hff21bea_0
- ncurses=6.5=h2d0b736_3
- nettle=3.9.1=h7ab15ed_0
- networkx=3.4.2=pyh267e887_2
- nlohmann_json=3.11.3=he02047a_1
- nn=1.86.0=hd590300_2003
- nspr=4.36=h5888daf_0
- nss=3.110=h159eef7_0
- numpy=1.26.4=py311h64a7726_0
- openblas=0.3.29=pthreads_h6ec200e_0
- opencv=4.7.0=py311h38be061_6
- openexr=2.5.5=h2bc3f7f_0
- openh264=2.3.1=hcb278e6_2
- openjpeg=2.5.0=hfec8fc6_2
- openssl=3.1.8=h7b32b05_0
- p11-kit=0.24.1=hc5aa10d_0
- pandas=2.2.3=py311h7db5c69_3
- pango=1.50.14=heaa33ce_1
- parallel=20250422=ha770c72_0
- pbzip2=1.1.13=h1fcc475_2
- pcl=1.13.1=hd129add_1
- pcre2=10.40=hc3806b6_0
- pdal=2.6.0_asp3.5.0=py311h3fd9d12_1
- perl=5.32.1=7_hd590300_perl5
- pip=25.0.1=pyh8b19718_0
- pixman=0.44.2=h29eaf8c_0
- plio=1.6.0=pyh12aca89_0
- poppler=23.08.0=hd18248d_0
- poppler-data=0.4.12=hd8ed1ab_0
- postgresql=15.4=h8972f4a_2
- proj=9.2.1=ha643af7_0
- propcache=0.3.1=py311h2dc5d0c_0
- protobuf=4.21.12=py311hcafe171_0
- pthread-stubs=0.4=hb9d3cd8_1002
- pugixml=1.13=h59595ed_1
- pulseaudio-client=16.1=hb77b528_5
- pvl=1.3.2=pyhd8ed1ab_1
- py-opencv=4.7.0=py311h781c19f_6
- pyproj=3.6.1=py311ha169711_0
- python=3.11.6=hab00c5b_0_cpython
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.11=7_cp311
- pytz=2025.2=pyhd8ed1ab_0
- pyyaml=6.0.2=py311h2dc5d0c_2
- qhull=2020.2=h434a139_5
- qt-main=5.15.8=h7fe3ca9_15
- qwt=6.2.0=h1a478b3_6
- rapidjson=1.1.0.post20240409=h3f2d84a_2
- rclone=1.69.1=hdac43c8_0
- readline=8.2=h8c095d6_2
- rhash=1.4.5=hb9d3cd8_0
- rocksdb=8.5.3=h699acb7_1
- s2n=1.3.54=h06160fa_0
- s2p-subset=asp3.5.0=h3fd9d12_0
- scipy=1.15.2=py311h8f841c2_0
- setuptools=79.0.0=pyhff2d567_0
- six=1.17.0=pyhd8ed1ab_0
- snappy=1.2.1=h8bd8927_1
- spiceypy=6.0.0=pyhd8ed1ab_2
- sqlalchemy=2.0.40=py311h9ecbd09_0
- sqlite=3.49.1=h9eae976_2
- stereo-pipeline=3.5.0=2
- suitesparse=5.10.1=h5a4f163_3
- superlu=5.2.2=h00795ac_0
- svt-av1=1.7.0=h59595ed_0
- sysroot_linux-64=2.17=h0157908_18
- tbb=2021.11.0=h00ab1b0_1
- tbb-devel=2021.11.0=h5ccd973_1
- tiledb=2.16.3=hf0b6e87_3
- tk=8.6.13=noxft_h4845f30_101
- tnt=126=0
- typing-extensions=4.13.2=h0e9735f_0
- typing_extensions=4.13.2=pyh29332c3_0
- tzcode=2025b=hb9d3cd8_0
- tzdata=2025b=h78e105d_0
- uriparser=0.9.8=hac33072_0
- usgscsm=2.0.2=h8e3e32c_3
- utfcpp=4.0.6=h005c6e1_0
- visionworkbench=asp3.5.0=0
- vlfeat=0.9.21=hd590300_1
- vtk=9.2.6=qt_py311h1234567_210
- vtk-base=9.2.6=qt_py311h1234567_210
- vtk-io-ffmpeg=9.2.6=qt_py311h1234567_210
- wheel=0.45.1=pyhd8ed1ab_1
- wslink=2.3.3=pyhd8ed1ab_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xcb-util=0.4.0=hd590300_1
- xcb-util-image=0.4.0=h8ee46fc_1
- xcb-util-keysyms=0.4.0=h8ee46fc_1
- xcb-util-renderutil=0.3.9=hd590300_1
- xcb-util-wm=0.4.1=h8ee46fc_1
- xerces-c=3.2.4=h8d71039_2
- xkeyboard-config=2.42=h4ab18f5_0
- xorg-damageproto=1.2.1=hb9d3cd8_1003
- xorg-fixesproto=5.0=hb9d3cd8_1003
- xorg-glproto=1.4.17=hb9d3cd8_1003
- xorg-inputproto=2.3.2=hb9d3cd8_1003
- xorg-kbproto=1.0.7=hb9d3cd8_1003
- xorg-libice=1.1.2=hb9d3cd8_0
- xorg-libsm=1.2.6=he73a12e_0
- xorg-libx11=1.8.9=h8ee46fc_0
- xorg-libxau=1.0.12=hb9d3cd8_0
- xorg-libxdamage=1.1.5=h7f98852_1
- xorg-libxdmcp=1.1.5=hb9d3cd8_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h4bc722e_1
- xorg-libxmu=1.1.3=h4ab18f5_1
- xorg-libxrandr=1.5.2=h7f98852_1
- xorg-libxrender=0.9.11=hd590300_0
- xorg-libxt=1.3.0=hd590300_1
- xorg-randrproto=1.5.0=hb9d3cd8_1002
- xorg-renderproto=0.11.1=hb9d3cd8_1003
- xorg-util-macros=1.20.2=hb9d3cd8_0
- xorg-xextproto=7.3.0=hb9d3cd8_1004
- xorg-xf86vidmodeproto=2.3.1=hb9d3cd8_1005
- xorg-xproto=7.0.31=hb9d3cd8_1008
- xz=5.8.1=hbcc6ac9_0
- xz-gpl-tools=5.8.1=hbcc6ac9_0
- xz-tools=5.8.1=hb9d3cd8_0
- yaml=0.2.5=h7f98852_2
- yarl=1.20.0=py311h2dc5d0c_0
- zlib=1.3.1=hb9d3cd8_2
- zstd=1.5.7=hb8e6e7a_2
================================================
FILE: conda/asp_3.5.0_osx_x64_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- https://repo.anaconda.com/pkgs/main
- https://repo.anaconda.com/pkgs/r
dependencies:
- affine=2.4.0=pyhd8ed1ab_1
- aiohappyeyeballs=2.6.1=pyhd8ed1ab_0
- aiohttp=3.11.18=py310h8e2f543_0
- aiosignal=1.3.2=pyhd8ed1ab_0
- ale=0.10.0=py310hf166250_3
- aom=3.7.1=h93d8f39_0
- armadillo=14.0.2=hae35012_0
- arpack=3.9.1=nompi_hdfe9103_102
- async-timeout=5.0.1=pyhd8ed1ab_1
- atk-1.0=2.38.0=h1d18e73_1
- attrs=25.3.0=pyh71513ae_0
- aws-c-auth=0.7.4=hbe6ead2_2
- aws-c-cal=0.6.2=hfc10710_2
- aws-c-common=0.9.3=h0dc2134_0
- aws-c-compression=0.2.17=hd41bdd4_3
- aws-c-event-stream=0.3.2=he52df9d_2
- aws-c-http=0.7.13=hb43ddd7_2
- aws-c-io=0.13.33=h2566903_0
- aws-c-mqtt=0.9.7=h6b10715_0
- aws-c-s3=0.3.17=h5800b94_4
- aws-c-sdkutils=0.1.12=hd41bdd4_2
- aws-checksums=0.1.17=hd41bdd4_2
- aws-crt-cpp=0.24.2=hf47b73b_2
- aws-sdk-cpp=1.10.57=h98cd3c2_24
- blas=2.131=openblas
- blas-devel=3.9.0=31_hbf4f893_openblas
- blosc=1.21.6=h7d75f6d_0
- boost=1.78.0=py310h92ebccd_5
- boost-cpp=1.78.0=hf5ba120_3
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=hfdf4475_7
- c-ares=1.34.5=hf13058a_0
- ca-certificates=2025.4.26=hbd8a1cb_0
- cached-property=1.5.2=hd8ed1ab_1
- cached_property=1.5.2=pyha770c72_1
- cairo=1.16.0=h09dd18c_1016
- ceres-solver=2.2.0=haa0d064_2
- certifi=2025.1.31=pyhd8ed1ab_0
- cfitsio=4.3.0=h66f91ea_0
- clang=16.0.6=default_h420b035_14
- clang-16=16.0.6=default_he1224e2_14
- clangxx=16.0.6=default_h2725d3a_14
- cmake=3.27.9=hc7ee4c4_0
- csm=3.0.3.3=h3718637_1
- cspice=67=h6e16a3a_8
- curl=8.4.0=h726d00d_0
- dav1d=1.2.1=h0dc2134_0
- double-conversion=3.3.1=h240833e_0
- eigen=3.4.0=h1c7c39f_0
- embree=3.13.0=hb7b477c_1
- expat=2.5.0=hf0c8a7f_1
- ffmpeg=6.1.0=gpl_h9a4e613_101
- fftw=3.3.10=nompi_h292e606_110
- fgr=asp3.5.0=1
- flann=1.9.1=hebd20f1_1013
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=h77eed37_3
- fontconfig=2.14.2=h5bb23bf_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.13.3=h694c41f_1
- freexl=1.0.6=hb7f2c08_1
- fribidi=1.0.10=hbcb3906_0
- frozenlist=1.5.0=py310h8e2f543_1
- gdk-pixbuf=2.42.10=hff535ac_2
- geoid=asp3.5.0=6
- geos=3.12.0=he965462_0
- geotiff=1.7.1=h5cf5d3c_11
- gettext=0.23.1=hd385c8e_0
- gettext-tools=0.23.1=h27064b9_0
- gflags=2.2.2=hac325c4_1005
- giflib=5.2.2=h10d778d_0
- gl2ps=1.4.2=hd82a5f3_1
- glew=2.1.0=h046ec9c_2
- glib=2.78.1=hf4d7fad_0
- glib-tools=2.78.1=hf4d7fad_0
- glog=0.7.1=h2790a97_0
- gmp=6.3.0=hf036a51_2
- gnutls=3.7.9=h1951705_0
- graphite2=1.3.13=h73e2aa4_1003
- graphviz=8.1.0=hc7f41f9_0
- greenlet=3.2.1=py310h6954a95_0
- gsl=2.6=h71c5fe9_2
- gst-plugins-base=1.22.7=hd283e88_0
- gstreamer=1.22.7=h8954545_0
- gtk2=2.24.33=h7c1209e_2
- gts=0.7.6=h53e17e3_4
- h5py=3.9.0=nompi_py310h164cd36_101
- harfbuzz=7.3.0=h413ba03_0
- hdf4=4.2.15=h9804679_6
- hdf5=1.14.1=nompi_hedada53_100
- icu=72.1=h7336db1_0
- idna=3.10=pyhd8ed1ab_1
- ilmbase=2.5.5=h01edc0c_1
- inja=3.3.0=he49afe7_0
- isis=8.3.0=0
- jama=125=0
- jasper=4.0.0=h794afb9_1
- jinja2=3.1.6=pyhd8ed1ab_0
- json-c=0.17=h6253ea5_1
- jsoncpp=1.9.5=h940c156_1
- kakadu=1=0
- kalasiris=1.11.0=pyhd8ed1ab_1
- kealib=1.5.1=h7014c1b_4
- krb5=1.21.2=hb884880_0
- lame=3.100=hb7f2c08_1003
- lcms2=2.15=h2dcdeff_1
- lerc=4.0.0=hcca01a6_1
- libabseil=20230125.3=cxx17_h000cb23_0
- libaec=1.1.3=h73e2aa4_0
- libarchive=3.6.2=h0b5dc4a_1
- libasprintf=0.23.1=h27064b9_0
- libasprintf-devel=0.23.1=h27064b9_0
- libass=0.17.1=h66d2fa1_0
- libblas=3.9.0=31_h7f60823_openblas
- libcblas=3.9.0=31_hff6cab4_openblas
- libclang=15.0.7=default_h7151d67_5
- libclang-cpp16=16.0.6=default_he1224e2_14
- libclang13=15.0.7=default_h0edc4dd_5
- libcurl=8.4.0=h726d00d_0
- libcxx=20.1.3=hf95d169_0
- libcxx-devel=16.0.6=h8f8a49f_2
- libdeflate=1.18=hac1461d_0
- libedit=3.1.20250104=pl5321ha958ccf_0
- libelas=asp3.5.0=h01edc0c_0
- libev=4.33=h10d778d_2
- libexpat=2.5.0=hf0c8a7f_1
- libffi=3.4.6=h281671d_1
- libfreetype=2.13.3=h694c41f_1
- libfreetype6=2.13.3=h40dfd5c_1
- libgd=2.3.3=h3d994df_6
- libgdal=3.7.1=h4103fcf_6
- libgettextpo=0.23.1=h27064b9_0
- libgettextpo-devel=0.23.1=h27064b9_0
- libgfortran=14.2.0=hef36b68_105
- libgfortran5=14.2.0=h58528f3_105
- libglib=2.78.1=h6d9ecee_0
- libhwloc=2.9.3=default_h24e0189_1009
- libiconv=1.18=h4b5e92a_1
- libidn2=2.3.8=he8ff88c_0
- libintl=0.23.1=h27064b9_0
- libintl-devel=0.23.1=h27064b9_0
- libjemalloc=5.3.0=hac325c4_1
- libjpeg-turbo=2.1.5.1=h0dc2134_1
- libkml=1.3.0=hab3ca0e_1016
- liblapack=3.9.0=31_h236ab99_openblas
- liblapacke=3.9.0=31_h85686d2_openblas
- libllvm15=15.0.7=he4b1e75_3
- libllvm16=16.0.6=he4b1e75_2
- libltdl=2.4.3a=h240833e_0
- liblzma=5.8.1=hd471939_0
- liblzma-devel=5.8.1=hd471939_0
- libnabo=asp3.5.0=h01edc0c_1
- libnetcdf=4.9.2=nompi_hb79a6a3_109
- libnghttp2=1.58.0=h64cf6d3_0
- libogg=1.3.5=hfdf4475_0
- libopenblas=0.3.29=openmp_hbf64a52_0
- libopencv=4.7.0=py310h12e1fec_6
- libopus=1.5.2=he3325bb_0
- libpng=1.6.47=h3c4a55f_0
- libpointmatcher=asp3.5.0=ha5a8b8e_1
- libpq=15.4=h3df487d_2
- libprotobuf=4.23.3=h5feb325_1
- librsvg=2.56.3=hec3db73_0
- librttopo=1.1.0=h23f359d_14
- libspatialite=5.0.1=h8e1b34b_28
- libsqlite=3.49.1=hdb6dae5_2
- libssh2=1.11.0=hd019ec5_0
- libtasn1=4.20.0=h6e16a3a_0
- libtheora=1.1.1=hfdf4475_1006
- libtiff=4.5.1=hf955e92_1
- libtool=2.5.4=h240833e_0
- libunistring=0.9.10=h0d85af4_0
- libuv=1.50.0=h4cb831e_0
- libvorbis=1.3.7=h046ec9c_0
- libvpx=1.13.1=he965462_0
- libwebp=1.3.1=hc961f54_0
- libwebp-base=1.3.1=h0dc2134_0
- libxcb=1.15=hb7f2c08_0
- libxml2=2.11.5=hd95e348_0
- libzip=1.10.1=hc158999_3
- libzlib=1.3.1=hd23fc13_2
- llvm-openmp=20.1.3=ha54dae1_0
- loguru=0.7.2=py310h2ec42d9_2
- lz4-c=1.9.4=hf0c8a7f_0
- lzo=2.10=h10d778d_1001
- markupsafe=3.0.2=py310h8e2f543_1
- mesalib=24.0.2=hbd9e708_1
- metis=5.1.0=h3023b02_1007
- mpfr=4.2.1=haed47dc_3
- msgpack-python=1.1.0=py310hfa8da69_0
- multidict=6.4.3=py310hf0d9222_0
- multiview=asp_3.5.0=py310_0
- mysql=5.7.20=h0a44026_1001
- mysql-common=8.0.33=h1d20c9b_6
- mysql-connector-c=6.1.11=h89ed7f3_1008
- mysql-libs=8.0.33=hed35180_6
- nanoflann=1.6.1=h429ed02_0
- ncurses=6.5=h0622a9a_3
- nettle=3.9.1=h8e11ae5_0
- networkx=3.4.2=pyh267e887_2
- nlohmann_json=3.11.3=hf036a51_1
- nn=1.86.0=h0dc2134_2003
- nspr=4.36=h97d8b74_0
- nss=3.110=h32a8879_0
- numpy=1.26.4=py310h4bfa8fc_0
- openblas=0.3.29=openmp_h30af337_0
- opencv=4.7.0=py310h2ec42d9_6
- openexr=2.5.5=ha5a8b8e_0
- openh264=2.3.1=hf0c8a7f_2
- openjpeg=2.5.0=h13ac156_2
- openssl=3.1.8=hc426f3f_0
- p11-kit=0.24.1=h65f8906_0
- pandas=2.2.3=py310h96a9d13_3
- pango=1.50.14=hbce5e75_1
- parallel=20250422=h694c41f_0
- pbzip2=1.1.13=h92b232e_2
- pcl=1.13.1=h3e6cc95_1
- pcre2=10.40=h1c4e4bc_0
- pdal=2.6.0_asp3.5.0=py310hfc30dc6_0
- perl=5.32.1=7_h10d778d_perl5
- pip=25.0.1=pyh8b19718_0
- pixman=0.44.2=h1fd1274_0
- plio=1.6.0=pyh12aca89_0
- poppler=23.08.0=he041c3a_0
- poppler-data=0.4.12=hd8ed1ab_0
- postgresql=15.4=hc940a54_2
- proj=9.2.1=hc8d59c9_0
- propcache=0.3.1=py310h8e2f543_0
- protobuf=4.23.3=py310h4e8a696_0
- pthread-stubs=0.4=h00291cd_1002
- pugixml=1.13=he965462_1
- pvl=1.3.2=pyhd8ed1ab_1
- py-opencv=4.7.0=py310ha188af9_6
- pyproj=3.6.1=py310hfcc5317_0
- python=3.10.13=h00d2728_0_cpython
- python-dateutil=2.9.0.post0=pyhff2d567_1
- python-tzdata=2025.2=pyhd8ed1ab_0
- python_abi=3.10=7_cp310
- pytz=2025.2=pyhd8ed1ab_0
- pyyaml=6.0.2=py310h8e2f543_2
- qhull=2020.2=h3c5361c_5
- qt-main=5.15.8=heb9da5c_15
- qwt=6.2.0=hed3eaa1_6
- rapidjson=1.1.0.post20240409=h92383a6_2
- rclone=1.69.1=hdbe1cec_0
- readline=8.2=h7cca4af_2
- rhash=1.4.5=ha44c9a9_0
- rocksdb=8.5.3=hcf0db61_1
- s2p-subset=asp3.5.0=h01edc0c_0
- scipy=1.15.2=py310hef62574_0
- setuptools=79.0.0=pyhff2d567_0
- six=1.17.0=pyhd8ed1ab_0
- snappy=1.2.1=haf3c120_1
- spiceypy=6.0.0=pyhd8ed1ab_2
- sqlalchemy=2.0.40=py310hbb8c376_0
- sqlite=3.49.1=h2e4c9dc_2
- stereo-pipeline=3.5.0=2
- suitesparse=5.10.1=h4bf45ed_3
- superlu=5.2.2=h1f0f902_0
- svt-av1=1.7.0=he965462_0
- tbb=2021.11.0=h7728843_1
- tbb-devel=2021.11.0=ha56d2d7_1
- tiledb=2.16.3=h9b026fb_3
- tk=8.6.13=h1abcd95_1
- tnt=126=0
- typing-extensions=4.13.2=h0e9735f_0
- typing_extensions=4.13.2=pyh29332c3_0
- tzcode=2025b=h6e16a3a_0
- tzdata=2025b=h78e105d_0
- uriparser=0.9.8=h6aefe2f_0
- usgscsm=2.0.2=h9275861_3
- utfcpp=4.0.6=h93fb1c9_0
- visionworkbench=asp3.5.0=2
- vlfeat=0.9.21=h0dc2134_1
- vtk=9.2.6=qt_py310h1234567_210
- vtk-base=9.2.6=qt_py310h1234567_210
- vtk-io-ffmpeg=9.2.6=qt_py310h1234567_210
- wheel=0.45.1=pyhd8ed1ab_1
- wslink=2.3.3=pyhd8ed1ab_0
- x264=1!164.3095=h775f41a_2
- x265=3.5=hbb4e6a2_3
- xerces-c=3.2.4=h90c7484_2
- xorg-damageproto=1.2.1=h00291cd_1003
- xorg-fixesproto=5.0=h00291cd_1003
- xorg-glproto=1.4.17=h00291cd_1003
- xorg-inputproto=2.3.2=h00291cd_1003
- xorg-kbproto=1.0.7=h00291cd_1003
- xorg-libice=1.1.2=h6e16a3a_0
- xorg-libsm=1.2.6=h6e16a3a_0
- xorg-libx11=1.8.9=hc955faf_0
- xorg-libxau=1.0.12=h6e16a3a_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.5=h00291cd_0
- xorg-libxext=1.3.4=hb7f2c08_2
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=hfdf4475_1
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.11=h0dc2134_0
- xorg-randrproto=1.5.0=h00291cd_1002
- xorg-renderproto=0.11.1=h00291cd_1003
- xorg-util-macros=1.20.2=h6e16a3a_0
- xorg-xextproto=7.3.0=h00291cd_1004
- xorg-xf86vidmodeproto=2.3.1=h00291cd_1005
- xorg-xproto=7.0.31=h00291cd_1008
- xz=5.8.1=h357f2ed_0
- xz-gpl-tools=5.8.1=h357f2ed_0
- xz-tools=5.8.1=hd471939_0
- yaml=0.2.5=h0d85af4_2
- yarl=1.20.0=py310h8e2f543_0
- zlib=1.3.1=hd23fc13_2
- zstd=1.5.7=h8210216_2
================================================
FILE: conda/asp_deps_2.7.0_linux_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=0_gnu
- _sysroot_linux-64_curr_repodata_hack=3=ha9d2b57_10
- alabaster=0.7.12=py_0
- ale=0.7.2=py36h9e03d57_1
- anaconda-client=1.7.2=py_0
- armadillo=9.200.7=hf4e8f56_0
- arpack=3.7.0=hc6cf775_1
- asn1crypto=1.4.0=pyh9f0ad1d_0
- attrs=20.2.0=pyh9f0ad1d_0
- babel=2.8.0=py_0
- beautifulsoup4=4.9.2=py_0
- binutils_impl_linux-64=2.35=h18a2f87_9
- binutils_linux-64=2.35=heab0d09_28
- blas=1.1=openblas
- boost=1.68.0=py36h8619c78_1001
- boost-cpp=1.68.0=h11c811c_1000
- brotlipy=0.7.0=py36h8c4c3a4_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h516909a_2
- ca-certificates=2020.6.20=hecda079_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=h0948850_10
- certifi=2020.6.20=py36h9f0ad1d_0
- cffi=1.14.3=py36h0ff685e_0
- chardet=3.0.4=py36h9f0ad1d_1007
- chrpath=0.16=h14c3975_1001
- click=7.1.2=pyh9f0ad1d_0
- clyent=1.2.2=py_1
- cmake=3.16.0=hf94ab9c_0
- conda=4.8.3=py36h9f0ad1d_1
- conda-build=3.19.2=py36h9f0ad1d_2
- conda-package-handling=1.7.0=py36h8c4c3a4_5
- conda-verify=3.1.1=py36h9f0ad1d_1001
- cryptography=2.5=py36hb7f436b_1
- csm=v3.0.3.1=h6bb024c_1001
- cspice=66=h516909a_1009
- curl=7.64.0=h646f8bb_0
- dbus=1.13.0=h4e0c4b3_1000
- decorator=4.4.2=py_0
- docutils=0.16=py36h9f0ad1d_1
- eigen=3.3.7=hc9558a2_1001
- embree=2.16.0=0
- expat=2.2.9=he1b5a44_2
- ffmpeg=3.4.1=0
- fgr=e78ce15=hf484d3e_0
- filelock=3.0.12=pyh9f0ad1d_0
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- future=0.18.2=py36h9f0ad1d_1
- gcc_impl_linux-64=7.5.0=hdb87b24_16
- gcc_linux-64=7.5.0=hf34d7eb_28
- gdal=2.0.2=hf484d3e_0
- geoid=1.0=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.4.2=hb54a4aa_1
- gettext=0.19.8.1=hc5be6a0_1002
- gflags=2.2.2=he1b5a44_1002
- gfortran_impl_linux-64=7.5.0=h1104b78_16
- gfortran_linux-64=7.5.0=ha781d05_28
- giflib=5.2.1=h516909a_2
- git=2.20.1=pl526hc122a05_1001
- glib=2.51.4=0
- glob2=0.7=py_0
- glog=0.4.0=h49b9bf7_3
- gmm=5.0=0
- gmp=6.2.0=he1b5a44_2
- gsl=2.6=h294904e_0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- gxx_impl_linux-64=7.5.0=h1104b78_16
- gxx_linux-64=7.5.0=ha781d05_28
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=hf484d3e_1000
- idna=2.10=pyh9f0ad1d_0
- ilmbase=2.5.2=h8b12597_0
- imagemagick=6.8.6_10=hf484d3e_0
- imagesize=1.2.0=py_0
- importlib-metadata=2.0.0=py36h9f0ad1d_0
- importlib_metadata=2.0.0=0
- ipython_genutils=0.2.0=py_1
- isis=4.1.0=0
- isis-headers=4.1.0=1000
- jama=125=0
- jasper=1.900.1=4
- jinja2=2.11.2=pyh9f0ad1d_0
- jpeg=9b=h024ee3a_2
- jsonschema=3.2.0=py36h9f0ad1d_1
- jupyter_core=4.6.3=py36h9f0ad1d_1
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h77966d4_10
- krb5=1.16.3=hc83ff2d_1000
- laszip=2.1.0=hf484d3e_1
- latexcodec=2.0.1=pyh9f0ad1d_0
- ld_impl_linux-64=2.35=h769bd43_9
- libarchive=3.3.3=ha149a29_1000
- libblas=3.8.0=11_openblas
- libcblas=3.8.0=11_openblas
- libcurl=7.64.0=h01ee5af_0
- libedit=3.1.20191231=h46ee950_0
- libffi=3.2.1=he1b5a44_1007
- libgcc=7.2.0=h69d50b8_2
- libgcc-devel_linux-64=7.5.0=h42c25f5_16
- libgcc-ng=9.2.0=h24d8f2e_2
- libgfortran=3.0.0=1
- libgfortran-ng=7.5.0
- libglvnd-cos7-x86_64=1.0.1=h9d98e8f_1104
- libglvnd-glx-cos7-x86_64=1.0.1=h9d98e8f_1104
- libgomp=9.2.0=h24d8f2e_2
- libiconv=1.15=h516909a_1006
- liblapack=3.8.0=11_openblas
- liblas=1.8.1=hf484d3e_1000
- liblief=0.10.1=he1b5a44_1
- libnabo=2df86e0=hf484d3e_0
- libopenblas=0.3.6=h5a2b251_2
- libpng=1.6.37=hed695b0_1
- libpointmatcher=bcf4b04=hf484d3e_0
- libpq=10.6=h13b8bad_1000
- libprotobuf=3.9.1=h8b12597_0
- libssh2=1.8.0=h1ad7b7a_1003
- libstdcxx-devel_linux-64=7.5.0=h4084dd6_16
- libstdcxx-ng=9.2.0=hdf63c60_2
- libtiff=4.0.9=he6b73bb_1
- libuuid=2.32.1=h14c3975_1000
- libuv=1.40.0=h516909a_0
- libwebp=0.5.2=7
- libxcb=1.13=h14c3975_1002
- libxml2=2.9.9=h13577e0_2
- llvm-openmp=8.0.1=hc9558a2_0
- markupsafe=1.1.1=py36h8c4c3a4_1
- mesalib=18.3.1=h590aaf7_0
- metis=5.1.0=he1b5a44_1005
- mysql=5.7.20=hf484d3e_1001
- mysql-connector-c=6.1.11=hab6429c_1002
- nanoflann=1.3.1=0
- nbformat=5.0.7=py_0
- ncurses=6.1=hf484d3e_1002
- networkx=2.4=py_1
- nlohmann_json=3.7.3=he1b5a44_1
- nn=1.86.0=h14c3975_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.2=he513fc3_0
- openjpeg=2.1.0=6
- openmp=8.0.1=0
- openssl=1.0.2u=h516909a_0
- oset=0.1.3=py_1
- packaging=20.4=pyh9f0ad1d_0
- parallel=20200522=0
- patchelf=0.11=he1b5a44_0
- pbzip2=1.1.13=0
- pcl=1.9.1=h482114b_1002
- pcre=8.44=he1b5a44_0
- perl=5.26.2=h516909a_1006
- pip=20.1.1=py_1
- pixman=0.34.0=h14c3975_1003
- pkginfo=1.5.0.1=py_0
- postgresql=10.6=h66cca7a_1000
- proj4=4.9.3=h516909a_9
- protobuf=3.9.1=py36he1b5a44_0
- psutil=5.7.2=py36h8c4c3a4_0
- pthread-stubs=0.4=h14c3975_1001
- pvl=0.3.0=py_1
- py-lief=0.10.1=py36h831f99a_1
- pybtex=0.22.2=py36h9f0ad1d_1
- pybtex-docutils=0.2.2=py36h9f0ad1d_1
- pycosat=0.6.3=py36h8c4c3a4_1004
- pycparser=2.20=pyh9f0ad1d_2
- pygments=2.7.1=py_0
- pyopenssl=19.0.0=py36_0
- pyparsing=2.4.7=pyh9f0ad1d_0
- pyrsistent=0.17.3=py36h8c4c3a4_0
- pysocks=1.7.1=py36h9f0ad1d_1
- python=3.6.7=hd21baee_1002
- python-dateutil=2.8.1=py_0
- python-libarchive-c=2.9=py36h9f0ad1d_1
- python_abi=3.6=1_cp36m
- pytz=2020.1=pyh9f0ad1d_0
- pyyaml=5.3.1=py36h8c4c3a4_0
- qhull=7.2.0=0
- qt=5.9.6=0
- qwt=6.1.3=0
- readline=7.0=hf8c457e_1001
- requests=2.24.0=pyh9f0ad1d_0
- rhash=1.3.6=h14c3975_1001
- ripgrep=12.1.1=h516909a_0
- ruamel_yaml=0.15.80=py36h8c4c3a4_1002
- scipy=1.2.1=py36h09a28d5_1
- setuptools=47.3.1=py36h9f0ad1d_0
- six=1.15.0=pyh9f0ad1d_0
- snowballstemmer=2.0.0=py_0
- soupsieve=2.0.1=py_1
- sphinx=3.1.1=py_0
- sphinxcontrib-applehelp=1.0.2=py_0
- sphinxcontrib-bibtex=1.0.0=py_0
- sphinxcontrib-devhelp=1.0.2=py_0
- sphinxcontrib-htmlhelp=1.0.3=py_0
- sphinxcontrib-jsmath=1.0.1=py_0
- sphinxcontrib-qthelp=1.0.3=py_0
- sphinxcontrib-serializinghtml=1.1.4=py_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h8b20d00_0
- suitesparse=5.7.2=h717dc36_0
- superlu=5.2.1=hfe2efc7_1207
- sysroot_linux-64=2.17=h77966d4_10
- tbb=2020.1=hc9558a2_0
- theia=f5d93f5=hf484d3e_1001
- tk=8.6.10=hed695b0_0
- tnt=126=0
- tqdm=4.50.0=pyh9f0ad1d_0
- traitlets=4.3.3=py36h9f0ad1d_1
- tzcode=2020a=h516909a_0
- urllib3=1.25.10=py_0
- usgscsm=a53f9cf=h6bb024c_0
- wheel=0.34.2=py_1
- x264=20131218=0
- xerces-c=3.1.4=0
- xorg-fixesproto=5.0=h14c3975_1002
- xorg-inputproto=2.3.2=h14c3975_1002
- xorg-kbproto=1.0.7=h14c3975_1002
- xorg-libice=1.0.10=h516909a_0
- xorg-libsm=1.2.3=h84519dc_1000
- xorg-libx11=1.6.9=h516909a_0
- xorg-libxau=1.0.9=h14c3975_0
- xorg-libxdmcp=1.1.3=h516909a_0
- xorg-libxext=1.3.4=h516909a_0
- xorg-libxfixes=5.0.3=h516909a_1004
- xorg-libxi=1.7.10=h516909a_0
- xorg-libxrender=0.9.10=h516909a_1002
- xorg-renderproto=0.11.1=h14c3975_1002
- xorg-xextproto=7.3.0=h14c3975_1002
- xorg-xproto=7.0.31=h14c3975_1007
- xz=5.2.5=h516909a_0
- yaml=0.2.5=h516909a_0
- zipp=3.2.0=py_0
- zlib=1.2.11=h516909a_1006
================================================
FILE: conda/asp_deps_2.7.0_osx_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- alabaster=0.7.12=py_0
- ale=0.7.2=py36h855b5bd_1
- anaconda-client=1.7.2=py_0
- armadillo=9.200.4=hcb88e47_1205
- arpack=3.6.3=h06d5271_1005
- asn1crypto=1.3.0=py36h9f0ad1d_1
- attrs=19.3.0=py_0
- babel=2.8.0=py_0
- beautifulsoup4=4.9.1=py36h9f0ad1d_0
- blas=1.1=openblas
- boost=1.68.0=py36h9888f84_1001
- boost-cpp=1.68.0=h6f8c590_1000
- brotlipy=0.7.0=py36h37b9a7d_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0b31af3_2
- ca-certificates=2020.6.20=hecda079_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=hb9d6bad_10
- certifi=2020.6.20=py36h9f0ad1d_0
- cffi=1.14.0=py36h356ff06_0
- chardet=3.0.4=py36h9f0ad1d_1006
- click=7.1.2=pyh9f0ad1d_0
- clyent=1.2.2=py_1
- cmake=3.14.5=hdd2e4aa_0
- conda=4.8.3=py36h9f0ad1d_1
- conda-build=3.19.2=py36h9f0ad1d_2
- conda-package-handling=1.6.0=py36h37b9a7d_2
- conda-verify=3.1.1=py36h9f0ad1d_1001
- cryptography=1.8.1=py36_0
- csm=v3.0.3.1=1001
- cspice=66=h0b31af3_1007
- curl=7.64.0=heae2a1f_0
- decorator=4.4.2=py_0
- docutils=0.16=py36h9f0ad1d_1
- eigen=3.3.7=ha1b3eb9_1001
- embree=2.16.0=h6834224_0
- expat=2.2.9=h4a8c4bd_2
- ffmpeg=3.4.1=0
- fgr=e78ce15=h0a44026_0
- filelock=3.0.12=pyh9f0ad1d_0
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- future=0.18.2=py36h9f0ad1d_1
- gdal=2.0.2=h0a44026_0
- geoid=1.0=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.4.2=hb54a4aa_1
- gettext=0.19.8.1=h46ab8bc_1002
- gflags=2.2.2=h4a8c4bd_1002
- giflib=5.2.1=h0b31af3_2
- git=2.11.1=0
- glib=2.51.4=0
- glob2=0.7=py_0
- glog=0.4.0=h700f914_3
- gmm=5.0=h6aef312_0
- gmp=6.2.0=h4a8c4bd_2
- gsl=2.6=ha2d443c_0
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=h0a44026_1000
- idna=2.9=py_1
- ilmbase=2.5.2=hd174df1_0
- imagemagick=6.8.6_10=h0a44026_0
- imagesize=1.2.0=py_0
- importlib-metadata=1.6.1=py36h9f0ad1d_0
- importlib_metadata=1.6.1=0
- ipython_genutils=0.2.0=py_1
- isis=4.1.0=0
- isis-headers=4.1.0=1000
- jama=125=0
- jasper=1.900.1=4
- jinja2=2.11.2=pyh9f0ad1d_0
- jpeg=9b=he5867d9_2
- jsonschema=3.2.0=py36h9f0ad1d_1
- jupyter_core=4.6.3=py36h9f0ad1d_1
- kakadu=1=0
- krb5=1.16.3=h24a3359_1000
- laszip=2.1.0=h0a44026_1
- latexcodec=2.0.1=pyh9f0ad1d_0
- libarchive=3.3.3=hb60c381_1000
- libblas=3.8.0=11_openblas
- libcblas=3.8.0=11_openblas
- libcurl=7.64.0=h76de61e_0
- libcxx=10.0.0=1
- libedit=3.1.20191231=hed1e85f_0
- libffi=3.2.1=h4a8c4bd_1007
- libgcc=4.8.5=hdbeacc1_10
- libgfortran=3.0.1=0
- libiconv=1.15=h0b31af3_1006
- liblapack=3.8.0=11_openblas
- liblas=1.8.1=h0a44026_1000
- liblief=0.9.0=h3e78482_1
- libnabo=2df86e0=h0a44026_0
- libopenblas=0.3.6=hdc02c5d_2
- libpng=1.6.37=hbbe82c9_1
- libpointmatcher=bcf4b04=h0a44026_0
- libpq=10.6=hbe1e24e_1000
- libprotobuf=3.9.1=hfbae3c0_0
- libssh2=1.8.0=hf30b1f0_1003
- libtiff=4.0.9=he6b73bb_1
- libuv=1.38.0=h0b31af3_0
- libwebp=0.5.2=7
- libxcb=1.13=h1de35cc_1002
- libxml2=2.9.9=hd80cff7_2
- llvm-openmp=10.0.0=h28b9765_0
- markupsafe=1.1.1=py36h37b9a7d_1
- mesalib=18.0.0=hb6cfc13_1
- metis=5.1.0=h4a8c4bd_1005
- mysql=5.7.20=h0a44026_1001
- mysql-connector-c=6.1.11=had4e77e_1002
- nanoflann=1.3.1=0
- nbformat=5.0.6=py_0
- ncurses=6.1=h0a44026_1002
- networkx=2.4=py_1
- nlohmann_json=3.7.3=h4a8c4bd_1
- nn=1.86.0=h1de35cc_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.2=h7475705_0
- openjpeg=2.1.0=6
- openssl=1.0.2u=h0b31af3_0
- oset=0.1.3=py_1
- packaging=20.4=pyh9f0ad1d_0
- parallel=20200522=0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.9.1=hdd77166_1002
- pcre=8.44=h4a8c4bd_0
- perl=5.26.2=haec8ef5_1006
- pip=20.1.1=py36_1
- pixman=0.34.0=h1de35cc_1003
- pkginfo=1.5.0.1=py_0
- postgresql=10.6=ha1bbaa7_1000
- proj4=4.9.3=h01d97ff_9
- protobuf=3.9.1=py36h6de7cb9_0
- psutil=5.7.0=py36h37b9a7d_1
- pthread-stubs=0.4=h1de35cc_1001
- pvl=0.3.0=py_1
- py-lief=0.9.0=py36h0ceac7d_1
- pybtex=0.22.2=py36h9f0ad1d_1
- pybtex-docutils=0.2.2=py36h9f0ad1d_1
- pycosat=0.6.3=py36h37b9a7d_1004
- pycparser=2.20=py_0
- pygments=2.6.1=py_0
- pyopenssl=17.0.0=py36_0
- pyparsing=2.4.7=pyh9f0ad1d_0
- pyrsistent=0.16.0=py36h37b9a7d_0
- pysocks=1.7.1=py36h9f0ad1d_1
- python=3.6.7=h4a56312_1002
- python-dateutil=2.8.1=py_0
- python-libarchive-c=2.9=py36_0
- python_abi=3.6=1_cp36m
- pytz=2020.1=pyh9f0ad1d_0
- pyyaml=5.3.1=py36h37b9a7d_0
- qhull=7.2.0=0
- qt=5.9.6=0
- qwt=6.1.3=0
- readline=7.0=hcfe32e1_1001
- requests=2.24.0=pyh9f0ad1d_0
- rhash=1.3.6=h1de35cc_1001
- ripgrep=12.1.1=h0b31af3_0
- ruamel_yaml=0.15.80=py36h37b9a7d_1001
- scipy=1.2.1=py36hbd7caa9_1
- setuptools=47.3.1=py36_0
- six=1.15.0=pyh9f0ad1d_0
- snowballstemmer=2.0.0=py_0
- soupsieve=2.0.1=py36h9f0ad1d_0
- sphinx=3.1.1=py_0
- sphinxcontrib-applehelp=1.0.2=py_0
- sphinxcontrib-bibtex=1.0.0=py_0
- sphinxcontrib-devhelp=1.0.2=py_0
- sphinxcontrib-htmlhelp=1.0.3=py_0
- sphinxcontrib-jsmath=1.0.1=py_0
- sphinxcontrib-qthelp=1.0.3=py_0
- sphinxcontrib-serializinghtml=1.1.4=py_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h9721f7c_0
- suitesparse=5.7.2=h0e59142_0
- superlu=5.2.1=hbced767_1205
- tbb=2019.9=ha1b3eb9_1
- theia=f5d93f5=h0a44026_1001
- tk=8.6.10=hb0a8c7a_0
- tnt=126=0
- tqdm=4.46.1=pyh9f0ad1d_0
- traitlets=4.3.3=py36h9f0ad1d_1
- tzcode=2020a=h0b31af3_0
- urllib3=1.25.9=py_0
- usgscsm=a53f9cf=h04f5b5a_0
- wget=1.18=0
- wheel=0.34.2=py36_0
- x264=20131218=0
- xerces-c=3.1.4=h10f7eb2_0
- xorg-fixesproto=5.0=h1de35cc_1002
- xorg-inputproto=2.3.2=h1de35cc_1002
- xorg-kbproto=1.0.7=h1de35cc_1002
- xorg-libice=1.0.10=h01d97ff_0
- xorg-libsm=1.2.3=h01d97ff_1000
- xorg-libx11=1.6.9=h0b31af3_0
- xorg-libxau=1.0.9=h1de35cc_0
- xorg-libxdmcp=1.1.3=h01d97ff_0
- xorg-libxext=1.3.4=h01d97ff_0
- xorg-libxfixes=5.0.3=h01d97ff_1004
- xorg-libxi=1.7.10=h01d97ff_0
- xorg-xextproto=7.3.0=h1de35cc_1002
- xorg-xproto=7.0.31=h1de35cc_1007
- xz=5.2.5=h1de35cc_0
- yaml=0.2.5=h0b31af3_0
- zipp=3.1.0=py_0
- zlib=1.2.11=h1de35cc_3
================================================
FILE: conda/asp_deps_3.0.0_linux_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=1_gnu
- _sysroot_linux-64_curr_repodata_hack=3=h5bd9786_12
- ale=0.8.5=py36h605e78d_3
- armadillo=9.900.5=h7c03176_0
- arpack=3.7.0=hc6cf775_2
- binutils_impl_linux-64=2.35.1=h27ae35d_9
- binutils_linux-64=2.35=h67ddf6f_30
- blas=1.1=openblas
- boost=1.68.0=py36h8619c78_1001
- boost-cpp=1.68.0=h11c811c_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- ca-certificates=2021.5.30=ha878542_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=h0948850_10
- certifi=2021.5.30=py36h5fab9bb_0
- chrpath=0.16=h7f98852_1002
- cmake=3.16.0=hf94ab9c_0
- csm=3.0.3.3=hc9558a2_0
- cspice=66=h7f98852_1014
- curl=7.64.0=h646f8bb_0
- dbus=1.13.0=h4e0c4b3_1000
- decorator=5.0.9=pyhd8ed1ab_0
- eigen=3.3.7=hc9558a2_1001
- embree=2.16.0=0
- expat=2.4.1=h9c3ff4c_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_hfc0cae8_1114
- fgr=isis5.0.1
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- gcc_impl_linux-64=7.5.0=habd7529_19
- gcc_linux-64=7.5.0=h47867f9_30
- geoid=1.0=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.4.3=hb6868eb_1001
- gettext=0.19.8.1=hf34092f_1004
- gflags=2.2.2=he1b5a44_1004
- gfortran_impl_linux-64=7.5.0=h56cb351_19
- gfortran_linux-64=7.5.0=h78c8a43_30
- giflib=5.2.1=h36c2ea0_2
- git=2.14.2=2
- glib=2.51.4=0
- glog=0.4.0=h49b9bf7_3
- gmm=5.0=0
- gmp=6.2.1=h58526e2_0
- gsl=2.7=he838d99_0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- gxx_impl_linux-64=7.5.0=hd0bb8aa_19
- gxx_linux-64=7.5.0=h555fc39_30
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=hf484d3e_1000
- ilmbase=2.5.5=h780b84a_0
- imagemagick=isis5.0.1
- inja=3.3.0=h9c3ff4c_0
- isis=5.0.1=0
- jama=125=0
- jasper=1.900.1=h07fcdf6_1006
- jpeg=9d=h36c2ea0_0
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_12
- krb5=1.16.3=hc83ff2d_1000
- laszip=2.1.0=hf484d3e_1
- ld_impl_linux-64=2.35.1=h7274673_9
- libblas=3.8.0=17_openblas
- libcblas=3.8.0=17_openblas
- libcurl=7.64.0=h01ee5af_0
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis5.0.1
- libffi=3.2.1=he1b5a44_1007
- libgcc=7.2.0=h69d50b8_2
- libgcc-devel_linux-64=7.5.0=hda03d7c_19
- libgcc-ng=11.1.0=hc902ee8_8
- libgdal=2.4.1_isis5.0.1=h3fd9d12_0
- libgfortran=3.0.0=1
- libgfortran-ng=7.5.0=h14aa051_19
- libgfortran4=7.5.0=h14aa051_19
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=11.1.0=hc902ee8_8
- libiconv=1.15=h516909a_1006
- liblapack=3.8.0=17_openblas
- liblas=isis5.0.1
- libnabo=isis5.0.1
- libopenblas=0.3.10=h5a2b251_0
- libpng=1.6.37=h21135ba_2
- libpointmatcher=isis5.0.1
- libpq=10.6=h13b8bad_1000
- libprotobuf=3.9.2=h8b12597_0
- libssh2=1.8.0=h1ad7b7a_1003
- libstdcxx-devel_linux-64=7.5.0=hb016644_19
- libstdcxx-ng=11.1.0=h56837e0_8
- libtiff=4.0.10=hc3755c2_1005
- libuuid=2.32.1=h7f98852_1000
- libuv=1.42.0=h7f98852_0
- libwebp=0.5.2=7
- libxcb=1.13=h7f98852_1003
- libxml2=2.9.9=h13577e0_2
- llvm-openmp=8.0.1=hc9558a2_0
- lz4-c=1.9.3=h9c3ff4c_1
- mesalib=18.3.1=h590aaf7_0
- metis=5.1.0=h58526e2_1006
- mpfr=4.1.0=h9202a9a_1
- mysql=5.7.20=hf484d3e_1001
- mysql-connector-c=6.1.11=hab6429c_1002
- nanoflann=1.3.2=ha770c72_0
- ncurses=6.2=he6710b0_1
- networkx=2.5=py_0
- nlohmann_json=3.10.2=h9c3ff4c_0
- nn=1.86.0=h14c3975_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=hf817b99_0
- openjpeg=2.1.0=6
- openmp=8.0.1=0
- openssl=1.0.2u=h516909a_0
- parallel=20210822=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.9.1=h482114b_1002
- pcre=8.45=h9c3ff4c_0
- perl=5.32.1=0_h7f98852_perl5
- pip=21.0.1=py36h06a4308_0
- pixman=0.34.0=h14c3975_1003
- postgresql=10.6=h66cca7a_1000
- proj4=5.2.0=he1b5a44_1006
- protobuf=3.9.2=py36he1b5a44_1
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.2.1=pyhd8ed1ab_0
- python=3.6.7=hd21baee_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2021.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hf8c457e_1001
- rhash=1.4.1=h7f98852_0
- s2p-subset=isis5.0.1
- scipy=1.2.1=py36h09a28d5_1
- setuptools=52.0.0=py36h06a4308_0
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h8b20d00_0
- suitesparse=5.10.1=hd8046ac_0
- superlu=5.2.2=hfe2efc7_0
- sysroot_linux-64=2.17=h4a8ded7_12
- tbb=2020.2=h4bd325d_4
- theia=isis5.0.1
- tk=8.6.10=hbc83047_0
- tnt=126=0
- tzcode=2021a=h7f98852_2
- usgscsm=1.5.2
- wheel=0.37.0=pyhd3eb1b0_0
- x264=20131218=0
- xerces-c=3.1.4=0
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.0.10=h7f98852_0
- xorg-libsm=1.2.3=hd9c2040_1000
- xorg-libx11=1.7.2=h7f98852_0
- xorg-libxau=1.0.9=h7f98852_0
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h7f98852_1
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-xextproto=7.3.0=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.5=h7b6447c_0
- yaml=0.2.5=h516909a_0
- zlib=1.2.11=h7b6447c_3
- zstd=1.4.9=ha95c52a_0
================================================
FILE: conda/asp_deps_3.0.0_osx_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.8.5=py36hc61eee1_3
- armadillo=9.200.4=hcb88e47_1205
- arpack=3.6.3=h06d5271_1005
- blas=1.1=openblas
- boost=1.68.0=py36h9888f84_1001
- boost-cpp=1.68.0=h6f8c590_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- ca-certificates=2021.5.30=h033912b_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=hb9d6bad_10
- certifi=2021.5.30=py36h79c6626_0
- cmake=3.3.1=0
- csm=3.0.3.3=0
- cspice=66=h0d85af4_1014
- curl=7.64.0=heae2a1f_0
- decorator=5.0.9=pyhd8ed1ab_0
- eigen=3.3.7=h04f5b5a_0
- embree=2.16.0=h6834224_0
- expat=2.4.1=he49afe7_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_h5c49c53_1109
- fgr=isis5.0.1
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- geoid=1.0=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.4.3=h113155d_1001
- gettext=0.19.8.1=h46ab8bc_1002
- gflags=2.2.2=hb1e8313_1004
- giflib=5.2.1=hbcb3906_2
- git=2.11.1=0
- glib=2.51.4=0
- glog=0.4.0=hb7f4fc5_3
- gmm=5.0=h6aef312_0
- gmp=6.2.1=h2e338ed_0
- gsl=2.7=h93259b0_0
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=h0a44026_1000
- ilmbase=2.5.5=hfab91a5_0
- imagemagick=isis5.0.1
- inja=3.2.0=h1c7c35f_0
- isis=5.0.1=0
- jama=125=0
- jasper=1.900.1=h636a363_1006
- jpeg=9d=hbcb3906_0
- kakadu=1=0
- krb5=1.16.3=h24a3359_1000
- laszip=2.1.0=h0a44026_1
- libblas=3.8.0=17_openblas
- libcblas=3.8.0=17_openblas
- libcurl=7.64.0=h76de61e_0
- libcxx=12.0.1=habf9029_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis5.0.1
- libffi=3.2.1=hb1e8313_1007
- libgcc=4.8.5=hdbeacc1_10
- libgdal=2.4.1_isis5.0.1
- libgfortran=3.0.1=0
- libiconv=1.15=h0b31af3_1006
- liblapack=3.8.0=17_openblas
- liblas=isis5.0.1
- libnabo=isis5.0.1
- libopenblas=0.3.10=h0794777_0
- libpng=1.6.37=h7cec526_2
- libpointmatcher=isis5.0.1
- libpq=10.6=hbe1e24e_1000
- libprotobuf=3.9.2=hfbae3c0_0
- libssh2=1.8.0=hf30b1f0_1003
- libtiff=4.0.10=ha78913b_1005
- libwebp=0.5.2=7
- libxcb=1.13=h35c211d_1003
- libxml2=2.9.9=hd80cff7_2
- llvm-openmp=12.0.1=hda6cdc1_0
- lz4-c=1.9.3=h046ec9c_0
- mesalib=18.0.0=hb6cfc13_1
- metis=5.1.0=h2e338ed_1006
- mpfr=4.0.2=h72d8aaf_1
- mysql=5.7.20=h0a44026_1001
- mysql-connector-c=6.1.11=had4e77e_1002
- nanoflann=1.3.2=h694c41f_0
- ncurses=6.2=h2e338ed_4
- networkx=2.5=py_0
- nlohmann_json=3.9.1=he49afe7_1
- nn=1.86.0=h1de35cc_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=h7fa7ffa_0
- openjpeg=2.1.0=6
- openssl=1.0.2u=h0b31af3_0
- parallel=20210622=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.9.1=hdd77166_1002
- pcre=8.45=he49afe7_0
- perl=5.32.1=0_h0d85af4_perl5
- pip=21.1.3=pyhd8ed1ab_0
- pixman=0.34.0=h1de35cc_1003
- postgresql=10.6=ha1bbaa7_1000
- proj4=5.2.0=h6de7cb9_1006
- protobuf=3.9.2=py36h6de7cb9_1
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.2.1=pyhd8ed1ab_0
- python=3.6.7=h4a56312_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2021.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h20b66c6_0
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hcfe32e1_1001
- s2p-subset=isis5.0.1
- scipy=1.2.1=py36hbd7caa9_1
- setuptools=49.6.0=py36h79c6626_3
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h9721f7c_0
- suitesparse=5.10.1=h68a9093_0
- superlu=5.2.1=hbced767_1205
- tbb=2020.2=h940c156_4
- theia=isis5.0.1
- tk=8.6.10=h0419947_1
- tnt=126=0
- tzcode=2021a=h0d85af4_2
- usgscsm=1.5.2
- wheel=0.36.2=pyhd3deb0d_0
- x264=20131218=0
- xerces-c=3.1.4=h10f7eb2_0
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.0.10=h0d85af4_0
- xorg-libsm=1.2.3=h0d85af4_1000
- xorg-libx11=1.7.2=h0d85af4_0
- xorg-libxau=1.0.9=h35c211d_0
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=h0d85af4_1
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-xextproto=7.3.0=h35c211d_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.5=haf1e3a3_1
- yaml=0.2.5=haf1e3a3_0
- zlib=1.2.11=h7795811_1010
- zstd=1.4.9=h582d3a0_0
================================================
FILE: conda/asp_deps_3.1.0_linux_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=1_gnu
- _sysroot_linux-64_curr_repodata_hack=3=h5bd9786_13
- ale=0.8.5=py36h605e78d_3
- armadillo=10.8.2=h7c03176_0
- arpack=3.7.0=hc6cf775_2
- binutils_impl_linux-64=2.36.1=h193b22a_2
- binutils_linux-64=2.36=hf3e587d_9
- blas=1.1=openblas
- boost=1.68.0=py36h8619c78_1001
- boost-cpp=1.68.0=h11c811c_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- ca-certificates=2021.10.8=ha878542_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=hf302a74_15
- chrpath=0.16=h7f98852_1002
- cmake=3.15.5=hf94ab9c_0
- csm=3.0.3.3=hc9558a2_0
- cspice=66=h7f98852_1015
- curl=7.64.0=h646f8bb_0
- dbus=1.13.0=h4e0c4b3_1000
- decorator=5.1.1=pyhd8ed1ab_0
- eigen=3.4.0=h4bd325d_0
- embree=2.16.0=0
- expat=2.4.8=h27087fc_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_hfc0cae8_1114
- fgr=isis6=h3fd9d12_0
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- gcc_impl_linux-64=11.2.0=h82a94d6_16
- gcc_linux-64=11.2.0=h39a9532_9
- geoid=1.0=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.4.3=hb6868eb_1001
- gettext=0.19.8.1=hf34092f_1004
- gflags=2.2.2=he1b5a44_1004
- giflib=5.2.1=h36c2ea0_2
- glib=2.51.4=0
- glog=0.6.0=h6f12383_0
- gmm=5.0=0
- gmp=6.2.1=h58526e2_0
- gsl=2.7=he838d99_0
- gst-plugins-base=1.8.0=0
- gstreamer=1.8.0=2
- gxx_impl_linux-64=11.2.0=h82a94d6_16
- gxx_linux-64=11.2.0=hacbe6df_9
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=hf484d3e_1000
- ilmbase=2.5.5=h780b84a_0
- imagemagick=isis6=h3fd9d12_0
- imath=3.1.5=h6239696_0
- inja=3.3.0=h9c3ff4c_0
- isis=6.0.0=0
- jama=125=0
- jasper=1.900.1=h07fcdf6_1006
- jpeg=9e=h166bdaf_1
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- krb5=1.16.3=hc83ff2d_1000
- laszip=2.1.0=hf484d3e_1
- ld_impl_linux-64=2.36.1=hea4e1c9_2
- libblas=3.9.0=13_linux64_openblas
- libcblas=3.9.0=13_linux64_openblas
- libcurl=7.64.0=h01ee5af_0
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis6=h3fd9d12_0
- libffi=3.2.1=he1b5a44_1007
- libgcc=7.2.0=h69d50b8_2
- libgcc-devel_linux-64=11.2.0=h0952999_16
- libgcc-ng=11.2.0=h1d223b6_15
- libgdal=2.4.1_isis6=h3fd9d12_0
- libgfortran=3.0.0=1
- libgfortran-ng=7.5.0=h14aa051_20
- libgfortran4=7.5.0=h14aa051_20
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=11.2.0=h1d223b6_15
- libiconv=1.15=h516909a_1006
- liblapack=3.9.0=13_linux64_openblas
- liblas=isis6=h3fd9d12_1000
- libnabo=isis6=h3fd9d12_0
- libnsl=2.0.0=h7f98852_0
- libopenblas=0.3.18=hf726d26_0
- libpng=1.6.37=h21135ba_2
- libpointmatcher=isis6=h2bc3f7f_0
- libpq=10.6=h13b8bad_1000
- libprotobuf=3.9.2=h8b12597_0
- libsanitizer=11.2.0=he4da1e4_16
- libssh2=1.8.0=h1ad7b7a_1003
- libstdcxx-devel_linux-64=11.2.0=h0952999_16
- libstdcxx-ng=11.2.0=he4da1e4_15
- libtiff=4.0.10=hc3755c2_1005
- libuuid=2.32.1=h7f98852_1000
- libuv=1.43.0=h7f98852_0
- libwebp=0.5.2=7
- libxcb=1.13=h7f98852_1004
- libxml2=2.9.9=h13577e0_2
- libzlib=1.2.11=h166bdaf_1014
- llvm-openmp=8.0.1=hc9558a2_0
- lz4-c=1.9.3=h9c3ff4c_1
- mesalib=18.3.1=h590aaf7_0
- metis=5.1.0=h58526e2_1006
- mpfr=4.1.0=h9202a9a_1
- mysql=5.7.20=hf484d3e_1001
- mysql-connector-c=6.1.11=hab6429c_1002
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.3=h27087fc_1
- networkx=2.5=py_0
- nlohmann_json=3.10.5=h9c3ff4c_0
- nn=1.86.0=h14c3975_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=hf817b99_0
- openjpeg=2.3.0=hf38bd82_1003
- openmp=8.0.1=0
- openssl=1.0.2u=h516909a_0
- parallel=20220222=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.9.1=h482114b_1002
- pcre=8.45=h9c3ff4c_0
- perl=5.32.1=2_h7f98852_perl5
- pip=21.3.1=pyhd8ed1ab_0
- pixman=0.34.0=h14c3975_1003
- postgresql=10.6=h66cca7a_1000
- proj4=5.2.0=he1b5a44_1006
- protobuf=3.9.2=py36he1b5a44_1
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.3.1=pyhd8ed1ab_0
- python=3.6.7=hd21baee_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2022.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36h8f6f2f9_1
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hf8c457e_1001
- rhash=1.4.1=h7f98852_0
- s2p-subset=isis6=h3fd9d12_0
- scipy=1.2.1=py36h09a28d5_1
- setuptools=58.0.4=py36h5fab9bb_2
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h8b20d00_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=hfe2efc7_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.5.0=h924138e_1
- theia=isis6=h3fd9d12_1001
- tk=8.6.12=h27826a3_0
- tnt=126=0
- tzcode=2022a=h166bdaf_0
- usgscsm=1.6.0_asp3.1.0=h2bc3f7f_0
- wheel=0.37.1=pyhd8ed1ab_0
- x264=20131218=0
- xerces-c=3.1.4=0
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.0.10=h7f98852_0
- xorg-libsm=1.2.3=hd9c2040_1000
- xorg-libx11=1.7.2=h7f98852_0
- xorg-libxau=1.0.9=h7f98852_0
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h7f98852_1
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-xextproto=7.3.0=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.5=h516909a_1
- yaml=0.2.5=h7f98852_2
- zlib=1.2.11=h166bdaf_1014
- zstd=1.4.9=ha95c52a_0
================================================
FILE: conda/asp_deps_3.1.0_osx_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.8.5=py36hc61eee1_3
- armadillo=9.200.4=hcb88e47_1205
- arpack=3.6.3=h06d5271_1005
- blas=1.1=openblas
- boost=1.68.0=py36h9888f84_1001
- boost-cpp=1.68.0=h6f8c590_1000
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- ca-certificates=2021.10.8=h033912b_0
- cairo=1.14.6=4
- ceres-solver=1.14.0=h636452b_15
- certifi=2021.5.30=py36h79c6626_0
- cmake=3.15.5=h6c18c4b_0
- csm=3.0.3.3=0
- cspice=66=h0d85af4_1015
- curl=7.64.0=heae2a1f_0
- decorator=5.1.1=pyhd8ed1ab_0
- eigen=3.4.0=h940c156_0
- embree=2.16.0=h6834224_0
- expat=2.4.8=h96cf925_0
- ffmpeg=3.4.1=0
- fftw=3.3.8=nompi_h5c49c53_1109
- fgr=isis6=h01edc0c_0
- flann=1.9.1=0
- fontconfig=2.12.1=4
- freetype=2.7=1
- geoid=1.0=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.4.3=h113155d_1001
- gettext=0.19.8.1=h46ab8bc_1002
- gflags=2.2.2=hb1e8313_1004
- giflib=5.2.1=hbcb3906_2
- glib=2.51.4=0
- glog=0.6.0=h8ac2a54_0
- gmm=5.0=h6aef312_0
- gmp=6.2.1=h2e338ed_0
- gsl=2.7=h93259b0_0
- harfbuzz=1.3.4=2
- hdf5=1.8.18=3
- htdp=1.0=1
- icu=58.2=h0a44026_1000
- ilmbase=2.5.5=hfab91a5_0
- imagemagick=isis6=h01edc0c_0
- inja=3.3.0=he49afe7_0
- isis=6.0.0=0
- jama=125=0
- jasper=1.900.1=h636a363_1006
- jpeg=9e=h5eb16cf_1
- kakadu=1=0
- krb5=1.16.3=h24a3359_1000
- laszip=2.1.0=h0a44026_1
- libblas=3.9.0=13_osx64_openblas
- libcblas=3.9.0=13_osx64_openblas
- libcurl=7.64.0=h76de61e_0
- libcxx=14.0.3=hc203e6f_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis6=0
- libffi=3.2.1=hb1e8313_1007
- libgcc=4.8.5=hdbeacc1_10
- libgdal=2.4.1_isis6=h01edc0c_0
- libgfortran=3.0.1=0
- libiconv=1.15=h0b31af3_1006
- liblapack=3.9.0=13_osx64_openblas
- liblas=isis6=h01edc0c_1000
- libnabo=isis6=h01edc0c_0
- libopenblas=0.3.18=h9a5756b_0
- libpng=1.6.37=h7cec526_2
- libpointmatcher=isis6=ha5a8b8e_0
- libpq=10.6=hbe1e24e_1000
- libprotobuf=3.9.2=hfbae3c0_0
- libssh2=1.8.0=hf30b1f0_1003
- libtiff=4.0.10=ha78913b_1005
- libuv=1.43.0=h0d85af4_0
- libwebp=0.5.2=7
- libxcb=1.13=h0d85af4_1004
- libxml2=2.9.9=hd80cff7_2
- libzlib=1.2.11=h6c3fc93_1014
- llvm-openmp=14.0.3=ha654fa7_0
- lz4-c=1.9.3=he49afe7_1
- mesalib=21.2.5=h2df1e00_3
- metis=5.1.0=h2e338ed_1006
- mpfr=4.1.0=h0f52abe_1
- mysql=5.7.20=h0a44026_1001
- mysql-connector-c=6.1.11=had4e77e_1002
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.3=h96cf925_1
- networkx=2.5=py_0
- nlohmann_json=3.10.5=he49afe7_0
- nn=1.86.0=h1de35cc_2003
- numpy=1.13.3=py36_blas_openblas_200
- openblas=0.2.19=2
- opencv=3.2.0=np113py36_blas_openblas_203
- openexr=2.5.5=h7fa7ffa_0
- openjpeg=2.3.0=h3bf0609_1003
- openssl=1.0.2u=h0b31af3_0
- parallel=20220222=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.9.1=hdd77166_1002
- pcre=8.45=he49afe7_0
- perl=5.32.1=2_h0d85af4_perl5
- pip=21.3.1=pyhd8ed1ab_0
- pixman=0.34.0=h1de35cc_1003
- postgresql=10.6=ha1bbaa7_1000
- proj4=5.2.0=h6de7cb9_1006
- protobuf=3.9.2=py36h6de7cb9_1
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.1=pyhd8ed1ab_0
- python=3.6.7=h4a56312_1002
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.6=2_cp36m
- pytz=2022.1=pyhd8ed1ab_0
- pyyaml=5.4.1=py36hfa26744_1
- qhull=7.2.0=0
- qt=5.9.6=7
- qwt=6.1.3=0
- readline=7.0=hcfe32e1_1001
- rhash=1.4.1=h0d85af4_0
- s2p-subset=isis6=h01edc0c_0
- scipy=1.2.1=py36hbd7caa9_1
- setuptools=58.0.4=py36h79c6626_2
- six=1.16.0=pyh6c4a22f_0
- spiceypy=2.3.2=py_0
- sqlite=3.28.0=h9721f7c_0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.1=hbced767_1205
- tbb=2021.5.0=hbb4e6a2_1
- theia=isis6=h01edc0c_1001
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- tzcode=2022a=h5eb16cf_0
- usgscsm=1.6.0_asp3.1.0=ha5a8b8e_0
- wheel=0.37.1=pyhd8ed1ab_0
- x264=20131218=0
- xerces-c=3.1.4=h10f7eb2_0
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.0.10=h0d85af4_0
- xorg-libsm=1.2.3=h0d85af4_1000
- xorg-libx11=1.7.2=h0d85af4_0
- xorg-libxau=1.0.9=h35c211d_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=h0d85af4_1
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.10=h0d85af4_1003
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=h35c211d_1002
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.5=haf1e3a3_1
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.11=h6c3fc93_1014
- zstd=1.4.9=h582d3a0_0
================================================
FILE: conda/asp_deps_3.2.0_linux_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- _sysroot_linux-64_curr_repodata_hack=3=h5bd9786_13
- ale=0.8.8=py39hf939315_1
- alsa-lib=1.2.6.1=h7f98852_0
- aom=3.4.0=h27087fc_1
- armadillo=11.4.2=h7209761_0
- arpack=3.7.0=hdefa2d7_2
- attr=2.5.1=h166bdaf_1
- binutils_impl_linux-64=2.39=he00db2b_1
- binutils_linux-64=2.39=h5fc0e48_11
- blas=2.116=openblas
- blas-devel=3.9.0=16_linux64_openblas
- boost=1.72.0=py39ha90915f_1
- boost-cpp=1.72.0=he72f1d9_7
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- c-ares=1.18.1=h7f98852_0
- ca-certificates=2022.12.7=ha878542_0
- cairo=1.16.0=ha61ee94_1014
- ceres-solver=1.14.0=hf302a74_15
- chrpath=0.16=h7f98852_1002
- cmake=3.15.5=hf94ab9c_0
- csm=3.0.3.3=hc9558a2_0
- cspice=67=h166bdaf_4
- curl=7.86.0=h7bff187_1
- cyrus-sasl=2.1.27=h230043b_5
- dbus=1.13.6=h5008d03_3
- eigen=3.4.0=h4bd325d_0
- elfutils=0.186=he364ef2_0
- embree=2.16.0=0
- expat=2.5.0=h27087fc_0
- ffmpeg=4.4.2=gpl_hfe78399_107
- fftw=3.3.10=nompi_hf0379b8_106
- fgr=isis7=h3fd9d12_0
- flann=1.9.1=he05ef13_1011
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.1=hc2a2eb6_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freeglut=3.2.2=h9c3ff4c_1
- freetype=2.12.1=hca18f0e_1
- gcc_impl_linux-64=12.2.0=hcc96c02_19
- gcc_linux-64=12.2.0=h4798a0e_11
- geoid=1.0_isis7=1
- geos=3.7.3=he1b5a44_0
- geotiff=1.7.1=ha76d385_4
- gettext=0.21.1=h27087fc_0
- gflags=2.2.2=he1b5a44_1004
- gfortran_impl_linux-64=12.2.0=h55be85b_19
- gfortran_linux-64=12.2.0=h307d370_11
- giflib=5.2.1=h36c2ea0_2
- glib=2.74.1=h6239696_1
- glib-tools=2.74.1=h6239696_1
- glog=0.6.0=h6f12383_0
- gmp=6.2.1=h58526e2_0
- gnutls=3.7.8=hf3e180e_0
- graphite2=1.3.13=h58526e2_1001
- gsl=2.7=he838d99_0
- gst-plugins-base=1.20.3=hf6a322e_0
- gstreamer=1.20.3=hd4edc92_2
- gxx_impl_linux-64=12.2.0=hcc96c02_19
- gxx_linux-64=12.2.0=hb41e900_11
- harfbuzz=5.3.0=h418a68e_0
- hdf5=1.12.2=nompi_h2386368_100
- htdp=1.0_isis7=1
- icu=70.1=h27087fc_0
- ilmbase=2.5.5=h780b84a_0
- inja=3.3.0=h9c3ff4c_0
- isis=7.1.0=0
- jack=1.9.18=h8c3723f_1002
- jama=125=0
- jasper=2.0.33=ha77e612_0
- jemalloc=5.2.1=h9c3ff4c_6
- jpeg=9e=h166bdaf_2
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- keyutils=1.6.1=h166bdaf_0
- krb5=1.19.3=h3790be6_0
- lame=3.100=h166bdaf_1003
- laszip=2.1.0_isis7=h3fd9d12_1
- ld_impl_linux-64=2.39=hcc3a1bd_1
- lerc=4.0.0=h27087fc_0
- libarchive=3.5.2=hb890918_3
- libblas=3.9.0=16_linux64_openblas
- libcap=2.64=ha37c62d_0
- libcblas=3.9.0=16_linux64_openblas
- libclang=14.0.6=default_h2e3cab8_0
- libclang13=14.0.6=default_h3a83d3e_0
- libcups=2.3.3=h3e49a29_2
- libcurl=7.86.0=h7bff187_1
- libcxx=14.0.6=hf52228f_0
- libcxxabi=14.0.6=ha770c72_0
- libdb=6.2.32=h9c3ff4c_0
- libdeflate=1.14=h166bdaf_0
- libdrm=2.4.114=h166bdaf_0
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis7=h3fd9d12_0
- libev=4.33=h516909a_1
- libevent=2.1.10=h9b69904_4
- libffi=3.4.2=h7f98852_5
- libflac=1.3.4=h27087fc_0
- libgcc-devel_linux-64=12.2.0=h3b97bd3_19
- libgcc-ng=12.2.0=h65d4601_19
- libgdal=3.5_isis7=h3fd9d12_0
- libgfortran-ng=12.2.0=h69a702a_19
- libgfortran5=12.2.0=h337968e_19
- libglib=2.74.1=h606061b_1
- libglu=9.0.0=he1b5a44_1001
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=12.2.0=h65d4601_19
- libhwloc=2.8.0=h32351e8_1
- libiconv=1.17=h166bdaf_0
- libidn2=2.3.4=h166bdaf_0
- libjemalloc=5.2.1=h9c3ff4c_6
- liblapack=3.9.0=16_linux64_openblas
- liblapacke=3.9.0=16_linux64_openblas
- liblas=1.8.2_isis7=h3fd9d12_0
- libllvm14=14.0.6=he0ac6c6_1
- libmicrohttpd=0.9.75=h2603550_1
- libnabo=isis7=h3fd9d12_0
- libnghttp2=1.47.0=hdcd2b5c_1
- libnsl=2.0.0=h7f98852_0
- libntlm=1.4=h7f98852_1002
- libogg=1.3.4=h7f98852_1
- libopenblas=0.3.21=pthreads_h78a6416_3
- libopencv=4.6.0=py39h04bf7ee_4
- libopus=1.3.1=h7f98852_1
- libpciaccess=0.17=h166bdaf_0
- libpng=1.6.39=h753d276_0
- libpointmatcher=isis7=h2bc3f7f_0
- libpq=14.5=h72a31a5_3
- libprotobuf=3.21.11=h3eb15da_0
- libsanitizer=12.2.0=h46fd767_19
- libsndfile=1.0.31=h9c3ff4c_1
- libsqlite=3.40.0=h753d276_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx-devel_linux-64=12.2.0=h3b97bd3_19
- libstdcxx-ng=12.2.0=h46fd767_19
- libtasn1=4.19.0=h166bdaf_0
- libtiff=4.4.0=h55922b4_4
- libtool=2.4.6=h9c3ff4c_1008
- libudev1=252=h166bdaf_0
- libunistring=0.9.10=h7f98852_0
- libuuid=2.32.1=h7f98852_1000
- libuv=1.44.2=h166bdaf_0
- libva=2.16.0=h166bdaf_0
- libvorbis=1.3.7=h9c3ff4c_0
- libvpx=1.11.0=h9c3ff4c_3
- libwebp=1.2.4=h522a892_0
- libwebp-base=1.2.4=h166bdaf_0
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=h22db469_4
- libzlib=1.2.13=h166bdaf_4
- llvm-openmp=15.0.6=he0ac6c6_0
- lz4-c=1.9.3=h9c3ff4c_1
- lzo=2.10=h516909a_1000
- mesalib=21.2.5=h0e4506f_3
- metis=5.1.0=h58526e2_1006
- mpfr=4.1.0=h9202a9a_1
- multiview=asp3.2.0=py39h3fd9d12_0
- mysql=8.0.31=h3e2b116_0
- mysql-client=8.0.31=hf89ab62_0
- mysql-common=8.0.31=haf5c9bc_0
- mysql-connector-c=6.1.11=h6eb9d5d_1007
- mysql-devel=8.0.31=haf5c9bc_0
- mysql-libs=8.0.31=h28c427c_0
- mysql-server=8.0.31=hb01f15f_0
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.3=h27087fc_1
- nettle=3.8.1=hc379101_1
- networkx=2.8.8=pyhd8ed1ab_0
- nlohmann_json=3.11.2=h27087fc_0
- nn=1.86.0=h14c3975_2003
- nspr=4.35=h27087fc_0
- nss=3.82=he02c5a1_0
- numpy=1.23.5=py39h3d75532_0
- openblas=0.3.21=pthreads_h320a7e8_3
- opencv=4.6.0=py39hf3d152e_4
- openexr=2.5.5=hf817b99_0
- openh264=2.3.0=h27087fc_0
- openjpeg=2.3.0=hf38bd82_1003
- openssl=1.1.1s=h0b41bf4_1
- p11-kit=0.24.1=hc5aa10d_0
- parallel=20221122=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.11.1=h05311af_1
- pcre2=10.40=hc3806b6_0
- perl=5.32.1=2_h7f98852_perl5
- pip=22.3.1=pyhd8ed1ab_0
- pixman=0.40.0=h36c2ea0_0
- portaudio=19.6.0=h57a0ea0_5
- proj=9.1.0=h93bde94_0
- protobuf=4.21.11=py39h227be39_0
- pthread-stubs=0.4=h36c2ea0_1001
- pulseaudio=14.0=h7f54b18_8
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.6.0=py39hef51801_4
- python=3.9.15=h47a2c10_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2022.6=pyhd8ed1ab_0
- pyyaml=6.0=py39hb9d737c_5
- qhull=2020.2=h4bd325d_2
- qt=5.15.4=hf11cfaa_0
- qt-main=5.15.4=ha5833f6_2
- qt-webengine=5.15.4=hcbadb6c_3
- qwt=6.2.0=h1d9fb53_4
- rapidjson=1.1.0=he1b5a44_1002
- rclone=1.61.1=h519d9b9_0
- readline=8.1.2=h0f457ee_0
- rhash=1.4.3=h166bdaf_0
- rocksdb=6.13.3=hda8cf21_2
- s2p-subset=isis7=h3fd9d12_0
- scipy=1.9.3=py39hddc5342_2
- setuptools=65.5.1=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.9=hbd366e4_2
- spiceypy=5.1.2=pyhd8ed1ab_0
- sqlite=3.40.0=h4ff8645_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=h00795ac_0
- svt-av1=1.2.1=h27087fc_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.7.0=h924138e_1
- tbb-devel=2021.7.0=h924138e_1
- tk=8.6.12=h27826a3_0
- tnt=126=0
- tzdata=2022g=h191b570_0
- usgscsm=1.6.0=h924138e_1
- wheel=0.38.4=pyhd8ed1ab_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xcb-util=0.4.0=h166bdaf_0
- xcb-util-image=0.4.0=h166bdaf_0
- xcb-util-keysyms=0.4.0=h166bdaf_0
- xcb-util-renderutil=0.3.9=h166bdaf_0
- xcb-util-wm=0.4.1=h166bdaf_0
- xerces-c=3.2.3=h55805fa_5
- xorg-damageproto=1.2.1=h7f98852_1002
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-glproto=1.4.17=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.0.10=h7f98852_0
- xorg-libsm=1.2.3=hd9c2040_1000
- xorg-libx11=1.7.2=h7f98852_0
- xorg-libxau=1.0.9=h7f98852_0
- xorg-libxdamage=1.1.5=h7f98852_1
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h7f98852_1
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrandr=1.5.2=h7f98852_1
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-randrproto=1.5.0=h7f98852_1001
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-util-macros=1.19.3=h7f98852_0
- xorg-xextproto=7.3.0=h7f98852_1002
- xorg-xf86vidmodeproto=2.3.1=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- zlib=1.2.13=h166bdaf_4
- zstd=1.5.2=h6239696_4
- mesa-libgl-cos6-x86_64
- xorg-libxmu
- mesalib
================================================
FILE: conda/asp_deps_3.2.0_osx_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.8.8=py39h92daf61_1
- aom=3.4.0=hb486fe8_1
- armadillo=11.4.2=hffeb596_0
- arpack=3.7.0=hefb7bc6_2
- blas=2.116=openblas
- blas-devel=3.9.0=16_osx64_openblas
- boost=1.72.0=py39hb64e6f8_1
- boost-cpp=1.72.0=h179ae3a_7
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- c-ares=1.18.1=h0d85af4_0
- ca-certificates=2022.12.7=h033912b_0
- cairo=1.16.0=h904041c_1014
- cctools_osx-64=973.0.1=hcc6d90d_11
- ceres-solver=1.14.0=h636452b_15
- clang=14.0.6=h694c41f_0
- clang-14=14.0.6=default_h55ffa42_0
- clang_osx-64=14.0.6=h3113cd8_4
- clangxx=14.0.6=default_h55ffa42_0
- cmake=3.15.5=h6c18c4b_0
- compiler-rt=14.0.6=h613da45_0
- compiler-rt_osx-64=14.0.6=h8d5cb93_0
- csm=3.0.3.3=0
- cspice=67=hb7f2c08_4
- curl=7.86.0=h57eb407_1
- cyrus-sasl=2.1.27=ha724b88_5
- eigen=3.4.0=h940c156_0
- embree=2.16.0=h6834224_0
- expat=2.5.0=hf0c8a7f_0
- ffmpeg=4.4.2=gpl_h5a1d76f_107
- fftw=3.3.10=nompi_h4fa670e_106
- fgr=isis7=h01edc0c_0
- flann=1.9.1=h56de9e4_1011
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.1=h5bb23bf_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freetype=2.12.1=h3f81eb7_1
- geoid=1.0_isis7=1
- geos=3.7.3=h4a8c4bd_0
- geotiff=1.7.1=he29fd1c_4
- gettext=0.21.1=h8a4c099_0
- gflags=2.2.2=hb1e8313_1004
- gfortran_impl_osx-64=11.3.0=h1f927f5_27
- gfortran_osx-64=11.3.0=h18f7dce_0
- giflib=5.2.1=hbcb3906_2
- glib=2.74.1=hbc0c0cd_1
- glib-tools=2.74.1=hbc0c0cd_1
- glog=0.6.0=h8ac2a54_0
- gmp=6.2.1=h2e338ed_0
- gnutls=3.7.8=h207c4f0_0
- graphite2=1.3.13=h2e338ed_1001
- gsl=2.7=h93259b0_0
- gst-plugins-base=1.20.3=h37e1711_2
- gstreamer=1.20.3=h1d18e73_2
- harfbuzz=5.3.0=h08f8713_0
- hdf5=1.12.2=nompi_hc782337_100
- htdp=1.0_isis7=1
- icu=70.1=h96cf925_0
- ilmbase=2.5.5=hfab91a5_0
- inja=3.3.0=he49afe7_0
- isis=7.1.0=0
- isl=0.25=hb486fe8_0
- jama=125=0
- jasper=2.0.33=h013e400_0
- jemalloc=5.2.1=he49afe7_6
- jpeg=9e=hac89ed1_2
- kakadu=1=0
- krb5=1.19.3=hb49756b_0
- lame=3.100=hb7f2c08_1003
- laszip=2.1.0_isis7=h01edc0c_1
- ld64_osx-64=609=hfd63004_11
- lerc=4.0.0=hb486fe8_0
- libblas=3.9.0=16_osx64_openblas
- libcblas=3.9.0=16_osx64_openblas
- libclang=14.0.6=default_h55ffa42_0
- libclang-cpp14=14.0.6=default_h55ffa42_0
- libclang13=14.0.6=default_hb5731bd_0
- libcurl=7.86.0=h57eb407_1
- libcxx=14.0.6=hccf4f1f_0
- libdeflate=1.14=hb7f2c08_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis7=h01edc0c_0
- libev=4.33=haf1e3a3_1
- libevent=2.1.10=h815e4d9_4
- libffi=3.4.2=h0d85af4_5
- libgdal=3.5_isis7=h01edc0c_0
- libgfortran=5.0.0=9_5_0_h97931a8_26
- libgfortran-devel_osx-64=11.3.0=h824d247_27
- libgfortran5=11.3.0=h082f757_26
- libglib=2.74.1=h4c723e1_1
- libiconv=1.17=hac89ed1_0
- libidn2=2.3.4=hb7f2c08_0
- libjemalloc=5.2.1=he49afe7_6
- liblapack=3.9.0=16_osx64_openblas
- liblapacke=3.9.0=16_osx64_openblas
- liblas=1.8.2_isis7=h01edc0c_0
- libllvm14=14.0.6=h5b596cc_1
- libnabo=isis7=h01edc0c_0
- libnghttp2=1.47.0=h7cbc4dc_1
- libntlm=1.4=h0d85af4_1002
- libogg=1.3.4=h35c211d_1
- libopenblas=0.3.21=openmp_h429af6e_3
- libopencv=4.6.0=py39h743a0d3_4
- libopus=1.3.1=hc929b4f_1
- libpng=1.6.39=ha978bb4_0
- libpointmatcher=isis7=ha5a8b8e_0
- libpq=14.5=h4aa9af9_3
- libprotobuf=3.21.11=hbc0c0cd_0
- libsqlite=3.40.0=ha978bb4_0
- libssh2=1.10.0=h7535e13_3
- libtasn1=4.19.0=hb7f2c08_0
- libtiff=4.4.0=hdb44e8a_4
- libunistring=0.9.10=h0d85af4_0
- libuv=1.44.2=hac89ed1_0
- libvorbis=1.3.7=h046ec9c_0
- libvpx=1.11.0=he49afe7_3
- libwebp=1.2.4=hfa4350a_0
- libwebp-base=1.2.4=h775f41a_0
- libxcb=1.13=h0d85af4_1004
- libxml2=2.9.14=hea49891_4
- libzlib=1.2.13=hfd90126_4
- llvm-openmp=15.0.6=h61d9ccf_0
- llvm-tools=14.0.6=h5b596cc_1
- lz4-c=1.9.3=he49afe7_1
- macports-legacy-support=1.0.7=hb7f2c08_0
- mesalib=21.2.5=h2df1e00_3
- metis=5.1.0=h2e338ed_1006
- mpc=1.2.1=hbb51d92_0
- mpfr=4.1.0=h0f52abe_1
- multiview=asp3.2.0=py39h01edc0c_0
- mysql=8.0.31=h57ddcff_0
- mysql-client=8.0.31=hbbbc359_0
- mysql-common=8.0.31=h7ebae80_0
- mysql-connector-c=6.1.11=h0f02589_1007
- mysql-devel=8.0.31=h7ebae80_0
- mysql-libs=8.0.31=hc37e033_0
- mysql-server=8.0.31=ha134c4c_0
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.3=h96cf925_1
- nettle=3.8.1=h96f3785_1
- networkx=2.8.8=pyhd8ed1ab_0
- nlohmann_json=3.11.2=hbbd2c75_0
- nn=1.86.0=h1de35cc_2003
- nspr=4.35=hea0b92c_0
- nss=3.78=ha8197d3_0
- numpy=1.23.5=py39hdfa1d0c_0
- openblas=0.3.21=openmp_hbefa662_3
- opencv=4.6.0=py39h6e9494a_4
- openexr=2.5.5=h7fa7ffa_0
- openh264=2.3.0=hb486fe8_0
- openjpeg=2.3.0=h3bf0609_1003
- openssl=1.1.1s=hfd90126_1
- p11-kit=0.24.1=h65f8906_0
- parallel=20221122=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.11.1=h7984e4d_1
- pcre2=10.40=h1c4e4bc_0
- perl=5.32.1=2_h0d85af4_perl5
- pip=22.3.1=pyhd8ed1ab_0
- pixman=0.40.0=hbcb3906_0
- proj=9.1.0=hcbd9701_0
- protobuf=4.21.11=py39h7a8716b_0
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.6.0=py39h71a6800_4
- python=3.9.15=h531fd05_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2022.6=pyhd8ed1ab_0
- pyyaml=6.0=py39ha30fb19_5
- qhull=2020.2=h940c156_2
- qt=5.15.4=hb3ad848_0
- qt-main=5.15.4=h938c29d_2
- qt-webengine=5.15.4=h72ca1e5_3
- qwt=6.2.0=h4cc5820_4
- rapidjson=1.1.0=hb1e8313_1002
- readline=8.1.2=h3899abd_0
- rhash=1.4.3=hac89ed1_0
- rocksdb=6.13.3=hbb73eaa_2
- s2p-subset=isis7=h01edc0c_0
- scipy=1.9.3=py39h8a15683_2
- setuptools=65.5.1=pyhd8ed1ab_0
- sigtool=0.1.3=h57ddcff_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.9=h225ccf5_2
- spiceypy=5.1.2=pyhd8ed1ab_0
- sqlite=3.40.0=h9ae0607_0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.2=h1f0f902_0
- svt-av1=1.2.1=hbbd2c75_0
- tapi=1100.0.11=h9ce4665_0
- tbb=2021.7.0=hb8565cd_1
- tbb-devel=2021.7.0=hb8565cd_1
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- tzdata=2022g=h191b570_0
- usgscsm=1.6.0=hb8565cd_1
- wheel=0.38.4=pyhd8ed1ab_0
- x264=1!164.3095=h775f41a_2
- x265=3.5=hbb4e6a2_3
- xerces-c=3.2.3=hf5b2a72_5
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.0.10=h0d85af4_0
- xorg-libsm=1.2.3=h0d85af4_1000
- xorg-libx11=1.7.2=h0d85af4_0
- xorg-libxau=1.0.9=h35c211d_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=h0d85af4_1
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.10=h0d85af4_1003
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=h35c211d_1002
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.6=h775f41a_0
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.13=hfd90126_4
- zstd=1.5.2=hfa58983_4
================================================
FILE: conda/asp_deps_3.3.0_linux_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- _sysroot_linux-64_curr_repodata_hack=3=h69a702a_13
- ale=0.9.1=py39h7633fee_0
- alsa-lib=1.2.7.2=h166bdaf_0
- aom=3.5.0=h27087fc_0
- armadillo=12.6.1=h0a193a4_0
- arpack=3.7.0=hdefa2d7_2
- blas=2.117=openblas
- blas-devel=3.9.0=17_linux64_openblas
- boost=1.72.0=py39ha90915f_1
- boost-cpp=1.72.0=h359cf19_6
- brotli-python=1.1.0=py39h3d6467e_0
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- c-ares=1.19.1=hd590300_0
- ca-certificates=2023.7.22=hbcca054_0
- cairo=1.16.0=ha12eb4b_1010
- ceres-solver=1.14.0=hf302a74_15
- certifi=2023.7.22=pyhd8ed1ab_0
- charset-normalizer=3.2.0=pyhd8ed1ab_0
- chrpath=0.16=h7f98852_1002
- csm=3.0.3.3=hc9558a2_0
- cspice=67=h166bdaf_4
- curl=7.87.0=h6312ad2_0
- cyrus-sasl=2.1.27=h957375c_6
- dbus=1.13.6=h5008d03_3
- eigen=3.4.0=h00ab1b0_0
- elfutils=0.188=hbb17bd0_0
- embree=2.17.7=ha770c72_3
- expat=2.5.0=hcb278e6_1
- ffmpeg=4.4.2=gpl_hbd009f3_109
- fftw=3.3.10=nompi_hc118613_108
- fgr=isis7=h3fd9d12_0
- flann=1.9.1=hfe772e8_1010
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- freeglut=3.2.2=h9c3ff4c_1
- freetype=2.12.1=hca18f0e_1
- geoid=1.0_isis7=1
- geos=3.9.1=h9c3ff4c_2
- geotiff=1.7.1=ha76d385_4
- gettext=0.21.1=h27087fc_0
- gflags=2.2.2=he1b5a44_1004
- giflib=5.2.1=h0b41bf4_3
- glib=2.76.4=hfc55251_0
- glib-tools=2.76.4=hfc55251_0
- glog=0.6.0=h6f12383_0
- gmp=6.2.1=h58526e2_0
- gnutls=3.7.8=hf3e180e_0
- graphite2=1.3.13=h58526e2_1001
- gsl=2.6=he838d99_2
- gst-plugins-base=1.20.3=h57caac4_2
- gstreamer=1.20.3=hd4edc92_2
- harfbuzz=4.2.0=h40b6f09_0
- hdf5=1.12.1=nompi_h2386368_104
- htdp=1.0_isis7=1
- icu=69.1=h9c3ff4c_0
- idna=3.4=pyhd8ed1ab_0
- ilmbase=2.5.5=h780b84a_0
- inja=3.3.0=h9c3ff4c_0
- isis=8.0.0=np125_0
- jama=125=0
- jasper=2.0.33=h0ff4b12_1
- jemalloc=5.3.0=hcb278e6_0
- jpeg=9e=h0b41bf4_3
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- keyutils=1.6.1=h166bdaf_0
- krb5=1.20.1=hf9c8cef_0
- lame=3.100=h166bdaf_1003
- laszip=2.1.0_isis7=h3fd9d12_1
- ld_impl_linux-64=2.40=h41732ed_0
- lerc=4.0.0=h27087fc_0
- libarchive=3.5.2=hb890918_3
- libblas=3.9.0=17_linux64_openblas
- libcblas=3.9.0=17_linux64_openblas
- libclang=13.0.1=default_h7634d5b_3
- libcurl=7.87.0=h6312ad2_0
- libcxx=16.0.6=h00ab1b0_0
- libcxxabi=16.0.6=ha770c72_0
- libdeflate=1.14=h166bdaf_0
- libdrm=2.4.114=h166bdaf_0
- libdrm-cos6-x86_64=2.4.65=4
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis7=h3fd9d12_0
- libev=4.33=h516909a_1
- libevent=2.1.10=h9b69904_4
- libexpat=2.5.0=hcb278e6_1
- libffi=3.4.2=h7f98852_5
- libgcc-ng=13.1.0=he5830b7_0
- libgdal=3.5_isis8=h3fd9d12_0
- libgfortran-ng=13.1.0=h69a702a_0
- libgfortran5=13.1.0=h15d22d2_0
- libglib=2.76.4=hebfc3b9_0
- libglu=9.0.0=he1b5a44_1001
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libhwloc=2.8.0=h32351e8_1
- libiconv=1.17=h166bdaf_0
- libidn2=2.3.4=h166bdaf_0
- libjemalloc=5.3.0=hcb278e6_0
- liblapack=3.9.0=17_linux64_openblas
- liblapacke=3.9.0=17_linux64_openblas
- liblas=1.8.2_isis8=h3fd9d12_0
- libllvm13=13.0.1=hf817b99_2
- libmicrohttpd=0.9.77=h97afed2_0
- libnabo=isis7=h3fd9d12_0
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.0=h7f98852_0
- libntlm=1.4=h7f98852_1002
- libogg=1.3.4=h7f98852_1
- libopenblas=0.3.23=pthreads_h80387f5_0
- libopencv=4.5.5=py39hb0e02d1_7
- libopus=1.3.1=h7f98852_1
- libpciaccess=0.17=h166bdaf_0
- libpng=1.6.39=h753d276_0
- libpointmatcher=isis7=h2bc3f7f_0
- libpq=14.5=h2baec63_5
- libprotobuf=3.19.6=h3eb15da_0
- libsqlite=3.43.0=h2797004_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx-ng=13.1.0=hfd8a6a1_0
- libtasn1=4.19.0=h166bdaf_0
- libtiff=4.4.0=h82bc61c_5
- libunistring=0.9.10=h7f98852_0
- libuuid=2.38.1=h0b41bf4_0
- libva=2.18.0=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libvpx=1.11.0=h9c3ff4c_3
- libwebp-base=1.3.1=hd590300_0
- libx11-common-cos6-x86_64=1.6.4=4
- libx11-cos6-x86_64=1.6.4=4
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=haae042b_4
- libzlib=1.2.13=hd590300_5
- llvm-openmp=16.0.6=h4dfa4b3_0
- lz4-c=1.9.3=h9c3ff4c_1
- lzo=2.10=h516909a_1000
- mesa-libgl-cos6-x86_64=11.0.7=4
- mesalib=23.0.0=h0fe20ba_0
- metis=5.1.0=h59595ed_1007
- mpfr=4.2.0=hb012696_0
- multiview=isis8=py39h3fd9d12_0
- mysql=8.0.28=h3e2b116_2
- mysql-client=8.0.28=hf89ab62_2
- mysql-common=8.0.28=haf5c9bc_2
- mysql-connector-c=6.1.11=h6eb9d5d_1007
- mysql-devel=8.0.28=haf5c9bc_2
- mysql-libs=8.0.28=h28c427c_2
- mysql-server=8.0.28=hb253900_2
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.4=hcb278e6_0
- nettle=3.8.1=hc379101_1
- networkx=3.1=pyhd8ed1ab_0
- nlohmann_json=3.11.2=h27087fc_0
- nn=1.86.0=hd590300_2003
- nspr=4.35=h27087fc_0
- nss=3.92=h1d7d5a4_0
- numpy=1.25.2=py39h6183b62_0
- openblas=0.3.23=pthreads_h855a84d_0
- opencv=4.5.5=py39hf3d152e_7
- openexr=2.5.5=hf817b99_0
- openh264=2.3.1=hcb278e6_2
- openjpeg=2.3.0=hf38bd82_1003
- openssl=1.1.1v=hd590300_0
- p11-kit=0.24.1=hc5aa10d_0
- packaging=23.1=pyhd8ed1ab_0
- parallel=20230722=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.11.1=h05311af_1
- pcre2=10.40=hc3806b6_0
- perl=5.32.1=4_hd590300_perl5
- pip=23.2.1=pyhd8ed1ab_0
- pixman=0.40.0=h36c2ea0_0
- platformdirs=3.10.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proj=9.1.0=h93bde94_0
- protobuf=3.19.6=py39h227be39_0
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.5.5=py39hef51801_7
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.15=h47a2c10_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py39hd1e30aa_0
- qhull=2020.2=h4bd325d_2
- qt=5.12.9=h1304e3e_6
- qwt=6.2.0=hb19a904_2
- rapidjson=1.1.0=he1b5a44_1002
- rclone=1.63.1=h519d9b9_0
- readline=8.2=h8228510_1
- requests=2.31.0=pyhd8ed1ab_0
- rocksdb=6.13.3=hda8cf21_2
- s2p-subset=isis7=h3fd9d12_0
- scipy=1.11.2=py39h6183b62_0
- setuptools=68.1.2=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- spiceypy=6.0.0=pyhd8ed1ab_0
- sqlite=3.43.0=h2c6b66d_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=h00795ac_0
- svt-av1=1.3.0=h27087fc_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.7.0=h924138e_1
- tbb-devel=2021.7.0=h924138e_1
- tk=8.6.12=h27826a3_0
- tnt=126=0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- urllib3=2.0.4=pyhd8ed1ab_0
- usgscsm=1.7.0=h00ab1b0_1
- wheel=0.41.2=pyhd8ed1ab_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xerces-c=3.2.3=h8ce2273_4
- xorg-damageproto=1.2.1=h7f98852_1002
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-glproto=1.4.17=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.1.1=hd590300_0
- xorg-libsm=1.2.4=h7391055_0
- xorg-libx11=1.8.4=h0b41bf4_0
- xorg-libxau=1.0.11=hd590300_0
- xorg-libxdamage=1.1.5=h7f98852_1
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrandr=1.5.2=h7f98852_1
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-randrproto=1.5.0=h7f98852_1001
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-util-macros=1.19.3=h7f98852_0
- xorg-xextproto=7.3.0=h0b41bf4_1003
- xorg-xf86vidmodeproto=2.3.1=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- zlib=1.2.13=hd590300_5
- zstd=1.5.5=hfc55251_0
================================================
FILE: conda/asp_deps_3.3.0_osx_env.yaml
================================================
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
- defaults
dependencies:
- ale=0.9.1=py39h8ee36c8_0
- aom=3.5.0=hf0c8a7f_0
- armadillo=12.6.1=hdc495e4_0
- arpack=3.7.0=hefb7bc6_2
- blas=2.117=openblas
- blas-devel=3.9.0=17_osx64_openblas
- boost=1.72.0=py39hb64e6f8_1
- boost-cpp=1.72.0=hf3dc895_6
- brotli-python=1.1.0=py39h840bb9f_0
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- c-ares=1.19.1=h0dc2134_0
- c-compiler=1.5.2=hbf74d83_0
- ca-certificates=2023.7.22=h8857fd0_0
- cairo=1.16.0=h9e0e54b_1010
- cctools=973.0.1=h76f1dac_13
- cctools_osx-64=973.0.1=hcc6d90d_13
- ceres-solver=1.14.0=h636452b_15
- certifi=2023.7.22=pyhd8ed1ab_0
- charset-normalizer=3.2.0=pyhd8ed1ab_0
- clang=14.0.6=h694c41f_1
- clang-14=14.0.6=default_hdb78580_1
- clang_osx-64=14.0.6=h3113cd8_6
- clangxx=14.0.6=default_hdb78580_1
- clangxx_osx-64=14.0.6=h6f97653_6
- cmake=3.27.4=hf40c264_4
- compiler-rt=14.0.6=h613da45_0
- compiler-rt_osx-64=14.0.6=hab78ec2_0
- compilers=1.5.2=h694c41f_0
- csm=3.0.3.3=0
- cspice=67=hb7f2c08_4
- curl=8.2.1=h5f667d7_0
- cxx-compiler=1.5.2=hb8565cd_0
- cyrus-sasl=2.1.27=hf9bab2b_7
- eigen=3.4.0=h1c7c39f_0
- embree=2.17.7=h694c41f_3
- expat=2.5.0=hf0c8a7f_1
- ffmpeg=4.4.2=gpl_hff0bab5_109
- fftw=3.3.10=nompi_h4fa670e_108
- fgr=isis7=h01edc0c_0
- flann=1.9.1=h30321d8_1010
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=h5bb23bf_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fortran-compiler=1.5.2=haad3a49_0
- freetype=2.12.1=h3f81eb7_1
- geoid=1.0_isis7=1
- geos=3.9.1=he49afe7_2
- geotiff=1.7.1=he29fd1c_4
- gettext=0.21.1=h8a4c099_0
- gflags=2.2.2=hb1e8313_1004
- gfortran=11.4.0=h2c809b3_1
- gfortran_impl_osx-64=11.4.0=h2a33dde_1
- gfortran_osx-64=11.4.0=h18f7dce_1
- giflib=5.2.1=hb7f2c08_3
- glog=0.6.0=h8ac2a54_0
- gmp=6.2.1=h2e338ed_0
- gnutls=3.7.8=h207c4f0_0
- graphite2=1.3.13=h2e338ed_1001
- gsl=2.6=h71c5fe9_2
- harfbuzz=4.2.0=h48644e2_0
- hdf5=1.12.1=nompi_h0aa1fa2_104
- htdp=1.0_isis7=1
- icu=69.1=he49afe7_0
- idna=3.4=pyhd8ed1ab_0
- ilmbase=2.5.5=hfab91a5_0
- inja=3.3.0=he49afe7_0
- isis=8.0.0=np125_0
- isl=0.25=hb486fe8_0
- jama=125=0
- jasper=2.0.33=h7c6fec8_1
- jemalloc=5.3.0=hf0c8a7f_0
- jpeg=9e=hb7f2c08_3
- kakadu=1=0
- krb5=1.21.1=hb884880_0
- lame=3.100=hb7f2c08_1003
- laszip=2.1.0_isis7=h01edc0c_1
- ld64=609=hc6ad406_13
- ld64_osx-64=609=hfd63004_13
- lerc=4.0.0=hb486fe8_0
- libblas=3.9.0=17_osx64_openblas
- libcblas=3.9.0=17_osx64_openblas
- libclang=13.0.1=root_62804_h2961583_3
- libclang-cpp14=14.0.6=default_hdb78580_1
- libcurl=8.2.1=h5f667d7_0
- libcxx=16.0.6=hd57cbcb_0
- libdeflate=1.14=hb7f2c08_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis7=h01edc0c_0
- libev=4.33=haf1e3a3_1
- libevent=2.1.10=h7d65743_4
- libexpat=2.5.0=hf0c8a7f_1
- libffi=3.4.2=h0d85af4_5
- libgdal=3.5_isis8=hf8dc8b4_0
- libgfortran=5.0.0=12_3_0_h97931a8_1
- libgfortran-devel_osx-64=11.4.0=h01aa347_1
- libgfortran5=12.3.0=hbd3c1fe_1
- libglib=2.76.4=hc62aa5d_0
- libiconv=1.17=hac89ed1_0
- libidn2=2.3.4=hb7f2c08_0
- libjemalloc=5.3.0=hf0c8a7f_0
- liblapack=3.9.0=17_osx64_openblas
- liblapacke=3.9.0=17_osx64_openblas
- liblas=1.8.2_isis8=hf8dc8b4_0
- libllvm13=13.0.1=h64f94b2_2
- libllvm14=14.0.6=hc8e404f_4
- libnabo=isis7=h01edc0c_0
- libnghttp2=1.52.0=he2ab024_0
- libntlm=1.4=h0d85af4_1002
- libopenblas=0.3.23=openmp_h429af6e_0
- libopencv=4.5.5=py39hc2bf5a6_7
- libpng=1.6.39=ha978bb4_0
- libpointmatcher=isis7=ha5a8b8e_0
- libpq=14.5=h3df487d_7
- libprotobuf=3.19.6=hbc0c0cd_0
- libsqlite=3.42.0=h58db7d2_0
- libssh2=1.11.0=hd019ec5_0
- libtasn1=4.19.0=hb7f2c08_0
- libtiff=4.4.0=h6268bbc_5
- libunistring=0.9.10=h0d85af4_0
- libuv=1.46.0=h0c2f820_0
- libvpx=1.11.0=he49afe7_3
- libwebp-base=1.3.1=h0dc2134_0
- libxcb=1.15=hb7f2c08_0
- libxml2=2.9.14=h1faee8b_4
- libzlib=1.2.13=h8a1eda9_5
- llvm-openmp=16.0.6=hff08bdf_0
- llvm-tools=14.0.6=hc8e404f_4
- lz4-c=1.9.3=he49afe7_1
- macports-legacy-support=1.0.13=h0dc2134_0
- mesalib=23.1.4=hb59017c_0
- metis=5.1.0=he965462_1007
- mpc=1.3.1=h81bd1dd_0
- mpfr=4.2.0=h4f9bd69_0
- multiview=isis8=py39hf8dc8b4_0
- mysql=8.0.28=h88f4db0_2
- mysql-client=8.0.28=h7ddd48c_2
- mysql-common=8.0.28=hdd8d184_2
- mysql-devel=8.0.28=hdd8d184_2
- mysql-libs=8.0.28=h353f102_2
- mysql-server=8.0.28=h6edde1b_2
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.4=hf0c8a7f_0
- nettle=3.8.1=h96f3785_1
- networkx=3.1=pyhd8ed1ab_0
- nlohmann_json=3.11.2=hbbd2c75_0
- nn=1.86.0=h0dc2134_2003
- nspr=4.35=hea0b92c_0
- nss=3.92=hd6ac835_0
- numpy=1.25.2=py39h892e69a_0
- openblas=0.3.23=openmp_hbefa662_0
- opencv=4.5.5=py39h6e9494a_7
- openexr=2.5.5=h7fa7ffa_0
- openh264=2.3.1=hf0c8a7f_2
- openjpeg=2.3.0=h3bf0609_1003
- openssl=3.1.2=h8a1eda9_0
- p11-kit=0.24.1=h65f8906_0
- packaging=23.1=pyhd8ed1ab_0
- parallel=20230722=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.11.1=h7984e4d_1
- pcre2=10.40=h1c4e4bc_0
- perl=5.32.1=4_h0dc2134_perl5
- pip=23.2.1=pyhd8ed1ab_0
- pixman=0.40.0=hbcb3906_0
- platformdirs=3.10.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proj=9.1.0=hcbd9701_0
- protobuf=3.19.6=py39h7a8716b_0
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.5.5=py39h71a6800_7
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.17=h07e1443_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py39hdc70f33_0
- qhull=2020.2=h940c156_2
- qt=5.12.9=h2a607e2_5
- qwt=6.2.0=h0f3c874_2
- rapidjson=1.1.0=hb1e8313_1002
- rclone=1.63.1=h1762f63_0
- readline=8.2=h9e318b2_1
- requests=2.31.0=pyhd8ed1ab_0
- rhash=1.4.4=h0dc2134_0
- rocksdb=6.13.3=hbb73eaa_2
- s2p-subset=isis7=h01edc0c_0
- scipy=1.11.2=py39hded996c_0
- setuptools=68.1.2=pyhd8ed1ab_0
- sigtool=0.1.3=h88f4db0_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.10=h225ccf5_0
- spiceypy=6.0.0=pyhd8ed1ab_0
- sqlite=3.42.0=h2b0dec6_0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.2=h1f0f902_0
- svt-av1=1.3.0=hf0c8a7f_0
- tapi=1100.0.11=h9ce4665_0
- tbb=2021.10.0.custom_asp=h6b95b14_0
- tbb-devel=2021.10.0.custom_asp=h6b95b14_0
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- urllib3=2.0.4=pyhd8ed1ab_0
- usgscsm=1.7.0=h1c7c39f_1
- wheel=0.41.2=pyhd8ed1ab_0
- x264=1!164.3095=h775f41a_2
- x265=3.5=hbb4e6a2_3
- xerces-c=3.2.3=h6564042_4
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.1.1=h0dc2134_0
- xorg-libsm=1.2.4=h0dc2134_0
- xorg-libx11=1.8.6=hbd0b022_0
- xorg-libxau=1.0.11=h0dc2134_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=hb7f2c08_2
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.11=h0dc2134_0
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=hb7f2c08_1003
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.6=h775f41a_0
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.13=h8a1eda9_5
- zstd=1.5.5=h829000d_0
================================================
FILE: conda/asp_deps_3.4.0_alpha_linux_env.yaml
================================================
name: asp_deps_3.4.0_alpha
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
dependencies:
- _libgcc_mutex=0.1=conda_forge
- _openmp_mutex=4.5=2_kmp_llvm
- _sysroot_linux-64_curr_repodata_hack=3=h69a702a_13
- ale=0.9.1=py39h7633fee_1
- alsa-lib=1.2.7.2=h166bdaf_0
- aom=3.5.0=h27087fc_0
- armadillo=12.6.1=h0a193a4_0
- arpack=3.7.0=hdefa2d7_2
- binutils=2.40=hdd6e379_0
- binutils_impl_linux-64=2.40=hf600244_0
- binutils_linux-64=2.40=hbdbef99_1
- blas=2.117=openblas
- blas-devel=3.9.0=17_linux64_openblas
- boost=1.72.0=py39ha90915f_1
- boost-cpp=1.72.0=h359cf19_6
- brotli-python=1.1.0=py39h3d6467e_0
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h7f98852_4
- c-ares=1.19.1=hd590300_0
- c-compiler=1.6.0=hd590300_0
- ca-certificates=2023.7.22=hbcca054_0
- cairo=1.16.0=ha12eb4b_1010
- ceres-solver=1.14.0=hf302a74_15
- certifi=2023.7.22=pyhd8ed1ab_0
- charset-normalizer=3.2.0=pyhd8ed1ab_0
- chrpath=0.16=h7f98852_1002
- cmake=3.25.2=h077f3f9_0
- compilers=1.6.0=ha770c72_0
- csm=3.0.3.3=hc9558a2_0
- cspice=67=h166bdaf_4
- curl=7.87.0=h6312ad2_0
- cxx-compiler=1.6.0=h00ab1b0_0
- cyrus-sasl=2.1.27=h957375c_6
- dbus=1.13.6=h5008d03_3
- eigen=3.4.0=h00ab1b0_0
- elfutils=0.188=hbb17bd0_0
- embree=2.17.7=ha770c72_3
- expat=2.5.0=hcb278e6_1
- ffmpeg=4.4.2=gpl_hbd009f3_109
- fftw=3.3.10=nompi_hc118613_108
- fgr=isis7=h3fd9d12_0
- flann=1.9.1=hfe772e8_1010
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=h14ed4e7_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fortran-compiler=1.6.0=heb67821_0
- freeglut=3.2.2=h9c3ff4c_1
- freetype=2.12.1=hca18f0e_1
- gcc=12.3.0=h8d2909c_1
- gcc_impl_linux-64=12.3.0=he2b93b0_0
- gcc_linux-64=12.3.0=h76fc315_1
- geoid=1.0_isis7=1
- geos=3.9.1=h9c3ff4c_2
- geotiff=1.7.1=ha76d385_4
- gettext=0.21.1=h27087fc_0
- gflags=2.2.2=he1b5a44_1004
- gfortran=12.3.0=h499e0f7_1
- gfortran_impl_linux-64=12.3.0=hfcedea8_0
- gfortran_linux-64=12.3.0=h7fe76b4_1
- giflib=5.2.1=h0b41bf4_3
- glib=2.76.4=hfc55251_0
- glib-tools=2.76.4=hfc55251_0
- glog=0.6.0=h6f12383_0
- gmp=6.2.1=h58526e2_0
- gnutls=3.7.8=hf3e180e_0
- graphite2=1.3.13=h58526e2_1001
- gsl=2.6=he838d99_2
- gst-plugins-base=1.20.3=h57caac4_2
- gstreamer=1.20.3=hd4edc92_2
- gxx=12.3.0=h8d2909c_1
- gxx_impl_linux-64=12.3.0=he2b93b0_0
- gxx_linux-64=12.3.0=h8a814eb_1
- harfbuzz=4.2.0=h40b6f09_0
- hdf5=1.12.1=nompi_h2386368_104
- htdp=1.0_isis7=1
- icu=69.1=h9c3ff4c_0
- idna=3.4=pyhd8ed1ab_0
- ilmbase=2.5.5=h780b84a_0
- inja=3.3.0=h9c3ff4c_0
- isis=8.0.0=np125_0
- jama=125=0
- jasper=2.0.33=h0ff4b12_1
- jemalloc=5.3.0=hcb278e6_0
- jpeg=9e=h0b41bf4_3
- kakadu=1=0
- kernel-headers_linux-64=3.10.0=h4a8ded7_13
- keyutils=1.6.1=h166bdaf_0
- krb5=1.20.1=hf9c8cef_0
- lame=3.100=h166bdaf_1003
- ld_impl_linux-64=2.40=h41732ed_0
- lerc=4.0.0=h27087fc_0
- libarchive=3.5.2=hb890918_3
- libblas=3.9.0=17_linux64_openblas
- libcblas=3.9.0=17_linux64_openblas
- libclang=13.0.1=default_h7634d5b_3
- libcurl=7.87.0=h6312ad2_0
- libcxx=16.0.6=h00ab1b0_0
- libcxxabi=16.0.6=ha770c72_0
- libdeflate=1.14=h166bdaf_0
- libdrm=2.4.114=h166bdaf_0
- libdrm-cos6-x86_64=2.4.65=4
- libedit=3.1.20191231=he28a2e2_2
- libelas=isis7=h3fd9d12_0
- libev=4.33=h516909a_1
- libevent=2.1.10=h9b69904_4
- libexpat=2.5.0=hcb278e6_1
- libffi=3.4.2=h7f98852_5
- libgcc-devel_linux-64=12.3.0=h8bca6fd_0
- libgcc-ng=13.2.0=h807b86a_2
- libgdal=3.5_isis8=h3fd9d12_0
- libgfortran-ng=13.1.0=h69a702a_0
- libgfortran5=13.1.0=h15d22d2_0
- libglib=2.76.4=hebfc3b9_0
- libglu=9.0.0=he1b5a44_1001
- libglvnd-cos7-x86_64=1.0.1=h9b0a68f_1105
- libgomp=13.2.0=h807b86a_2
- libhwloc=2.8.0=h32351e8_1
- libiconv=1.17=h166bdaf_0
- libidn2=2.3.4=h166bdaf_0
- libjemalloc=5.3.0=hcb278e6_0
- liblapack=3.9.0=17_linux64_openblas
- liblapacke=3.9.0=17_linux64_openblas
- libllvm13=13.0.1=hf817b99_2
- libmicrohttpd=0.9.77=h97afed2_0
- libnabo=isis7=h3fd9d12_0
- libnghttp2=1.51.0=hdcd2b5c_0
- libnsl=2.0.0=h7f98852_0
- libntlm=1.4=h7f98852_1002
- libogg=1.3.4=h7f98852_1
- libopenblas=0.3.23=pthreads_h80387f5_0
- libopencv=4.5.5=py39hb0e02d1_7
- libopus=1.3.1=h7f98852_1
- libpciaccess=0.17=h166bdaf_0
- libpng=1.6.39=h753d276_0
- libpointmatcher=isis7=h2bc3f7f_0
- libpq=14.5=h2baec63_5
- libprotobuf=3.19.6=h3eb15da_0
- libsanitizer=12.3.0=h0f45ef3_0
- libsqlite=3.43.0=h2797004_0
- libssh2=1.10.0=haa6b8db_3
- libstdcxx-devel_linux-64=12.3.0=h8bca6fd_0
- libstdcxx-ng=13.2.0=h7e041cc_2
- libtasn1=4.19.0=h166bdaf_0
- libtiff=4.4.0=h82bc61c_5
- libunistring=0.9.10=h7f98852_0
- libuuid=2.38.1=h0b41bf4_0
- libuv=1.46.0=hd590300_0
- libva=2.18.0=h0b41bf4_0
- libvorbis=1.3.7=h9c3ff4c_0
- libvpx=1.11.0=h9c3ff4c_3
- libwebp-base=1.3.1=hd590300_0
- libx11-common-cos6-x86_64=1.6.4=4
- libx11-cos6-x86_64=1.6.4=4
- libxcb=1.13=h7f98852_1004
- libxkbcommon=1.0.3=he3ba5ed_0
- libxml2=2.9.14=haae042b_4
- libzlib=1.2.13=hd590300_5
- llvm-openmp=16.0.6=h4dfa4b3_0
- lz4-c=1.9.3=h9c3ff4c_1
- lzo=2.10=h516909a_1000
- mesa-libgl-cos6-x86_64=11.0.7=4
- mesalib=23.0.0=h0fe20ba_0
- metis=5.1.0=h59595ed_1007
- mpfr=4.2.0=hb012696_0
- multiview=isis8=py39h3fd9d12_0
- mysql=8.0.28=h3e2b116_2
- mysql-client=8.0.28=hf89ab62_2
- mysql-common=8.0.28=haf5c9bc_2
- mysql-connector-c=6.1.11=h6eb9d5d_1007
- mysql-devel=8.0.28=haf5c9bc_2
- mysql-libs=8.0.28=h28c427c_2
- mysql-server=8.0.28=hb253900_2
- nanoflann=1.4.2=ha770c72_0
- ncurses=6.4=hcb278e6_0
- nettle=3.8.1=hc379101_1
- networkx=3.1=pyhd8ed1ab_0
- nlohmann_json=3.11.2=h27087fc_0
- nn=1.86.0=hd590300_2003
- nspr=4.35=h27087fc_0
- nss=3.92=h1d7d5a4_0
- numpy=1.25.2=py39h6183b62_0
- openblas=0.3.23=pthreads_h855a84d_0
- opencv=4.5.5=py39hf3d152e_7
- openexr=2.5.5=hf817b99_0
- openh264=2.3.1=hcb278e6_2
- openjpeg=2.3.0=hf38bd82_1003
- openssl=1.1.1v=hd590300_0
- p11-kit=0.24.1=hc5aa10d_0
- packaging=23.1=pyhd8ed1ab_0
- parallel=20230722=ha770c72_0
- pbzip2=1.1.13=0
- pcl=1.11.1=h05311af_1
- pcre2=10.40=hc3806b6_0
- pdal=2.4.2_asp3.3.0=py39h3fd9d12_0
- perl=5.32.1=4_hd590300_perl5
- pip=23.2.1=pyhd8ed1ab_0
- pixman=0.40.0=h36c2ea0_0
- platformdirs=3.10.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proj=9.1.0=h93bde94_0
- protobuf=3.19.6=py39h227be39_0
- pthread-stubs=0.4=h36c2ea0_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.5.5=py39hef51801_7
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.15=h47a2c10_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py39hd1e30aa_0
- qhull=2020.2=h4bd325d_2
- qt=5.12.9=h1304e3e_6
- qwt=6.2.0=hb19a904_2
- rapidjson=1.1.0=he1b5a44_1002
- rclone=1.63.1=h519d9b9_0
- readline=8.2=h8228510_1
- requests=2.31.0=pyhd8ed1ab_0
- rhash=1.4.3=hd590300_2
- rocksdb=6.13.3=hda8cf21_2
- s2p-subset=isis7=h3fd9d12_0
- scipy=1.11.2=py39h6183b62_0
- setuptools=68.1.2=pyhd8ed1ab_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.10=h9fff704_0
- spiceypy=6.0.0=pyhd8ed1ab_0
- sqlite=3.43.0=h2c6b66d_0
- suitesparse=5.10.1=h9e50725_1
- superlu=5.2.2=h00795ac_0
- svt-av1=1.3.0=h27087fc_0
- sysroot_linux-64=2.17=h4a8ded7_13
- tbb=2021.7.0=h924138e_1
- tbb-devel=2021.7.0=h924138e_1
- tiledb=2.9.5=h1e4a385_0
- tk=8.6.12=h27826a3_0
- tnt=126=0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- urllib3=2.0.4=pyhd8ed1ab_0
- usgscsm=1.7.0=h00ab1b0_1
- wheel=0.41.2=pyhd8ed1ab_0
- x264=1!164.3095=h166bdaf_2
- x265=3.5=h924138e_3
- xerces-c=3.2.3=h8ce2273_4
- xorg-damageproto=1.2.1=h7f98852_1002
- xorg-fixesproto=5.0=h7f98852_1002
- xorg-glproto=1.4.17=h7f98852_1002
- xorg-inputproto=2.3.2=h7f98852_1002
- xorg-kbproto=1.0.7=h7f98852_1002
- xorg-libice=1.1.1=hd590300_0
- xorg-libsm=1.2.4=h7391055_0
- xorg-libx11=1.8.4=h0b41bf4_0
- xorg-libxau=1.0.11=hd590300_0
- xorg-libxdamage=1.1.5=h7f98852_1
- xorg-libxdmcp=1.1.3=h7f98852_0
- xorg-libxext=1.3.4=h0b41bf4_2
- xorg-libxfixes=5.0.3=h7f98852_1004
- xorg-libxi=1.7.10=h7f98852_0
- xorg-libxrandr=1.5.2=h7f98852_1
- xorg-libxrender=0.9.10=h7f98852_1003
- xorg-randrproto=1.5.0=h7f98852_1001
- xorg-renderproto=0.11.1=h7f98852_1002
- xorg-util-macros=1.19.3=h7f98852_0
- xorg-xextproto=7.3.0=h0b41bf4_1003
- xorg-xf86vidmodeproto=2.3.1=h7f98852_1002
- xorg-xproto=7.0.31=h7f98852_1007
- xz=5.2.6=h166bdaf_0
- yaml=0.2.5=h7f98852_2
- zlib=1.2.13=hd590300_5
- zstd=1.5.5=hfc55251_0
================================================
FILE: conda/asp_deps_3.4.0_alpha_osx_env.yaml
================================================
name: asp_deps_3.4.0_alpha
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
dependencies:
- ale=0.9.1=py39h8ee36c8_0
- aom=3.5.0=hf0c8a7f_0
- armadillo=12.6.1=hdc495e4_0
- arpack=3.7.0=hefb7bc6_2
- blas=2.117=openblas
- blas-devel=3.9.0=17_osx64_openblas
- boost=1.72.0=py39hb64e6f8_1
- boost-cpp=1.72.0=hf3dc895_6
- brotli-python=1.1.0=py39h840bb9f_0
- bullet=2.86.1=0
- bz2file=0.98=py_0
- bzip2=1.0.8=h0d85af4_4
- c-ares=1.19.1=h0dc2134_0
- c-compiler=1.5.2=hbf74d83_0
- ca-certificates=2023.7.22=h8857fd0_0
- cairo=1.16.0=h9e0e54b_1010
- cctools=973.0.1=h76f1dac_13
- cctools_osx-64=973.0.1=hcc6d90d_13
- ceres-solver=1.14.0=h636452b_15
- certifi=2023.7.22=pyhd8ed1ab_0
- charset-normalizer=3.2.0=pyhd8ed1ab_0
- clang=14.0.6=h694c41f_1
- clang-14=14.0.6=default_hdb78580_1
- clang_osx-64=14.0.6=h3113cd8_6
- clangxx=14.0.6=default_hdb78580_1
- clangxx_osx-64=14.0.6=h6f97653_6
- cmake=3.27.4=hf40c264_4
- compiler-rt=14.0.6=h613da45_0
- compiler-rt_osx-64=14.0.6=hab78ec2_0
- compilers=1.5.2=h694c41f_0
- csm=3.0.3.3=0
- cspice=67=hb7f2c08_4
- curl=8.2.1=h5f667d7_0
- cxx-compiler=1.5.2=hb8565cd_0
- cyrus-sasl=2.1.27=hf9bab2b_7
- eigen=3.4.0=h1c7c39f_0
- embree=2.17.7=h694c41f_3
- expat=2.5.0=hf0c8a7f_1
- ffmpeg=4.4.2=gpl_hff0bab5_109
- fftw=3.3.10=nompi_h4fa670e_108
- fgr=isis7=h01edc0c_0
- flann=1.9.1=h30321d8_1010
- font-ttf-dejavu-sans-mono=2.37=hab24e00_0
- font-ttf-inconsolata=3.000=h77eed37_0
- font-ttf-source-code-pro=2.038=h77eed37_0
- font-ttf-ubuntu=0.83=hab24e00_0
- fontconfig=2.14.2=h5bb23bf_0
- fonts-conda-ecosystem=1=0
- fonts-conda-forge=1=0
- fortran-compiler=1.5.2=haad3a49_0
- freetype=2.12.1=h3f81eb7_1
- geoid=1.0_isis7=1
- geos=3.9.1=he49afe7_2
- geotiff=1.7.1=he29fd1c_4
- gettext=0.21.1=h8a4c099_0
- gflags=2.2.2=hb1e8313_1004
- gfortran=11.4.0=h2c809b3_1
- gfortran_impl_osx-64=11.4.0=h2a33dde_1
- gfortran_osx-64=11.4.0=h18f7dce_1
- giflib=5.2.1=hb7f2c08_3
- glog=0.6.0=h8ac2a54_0
- gmp=6.2.1=h2e338ed_0
- gnutls=3.7.8=h207c4f0_0
- graphite2=1.3.13=h2e338ed_1001
- gsl=2.6=h71c5fe9_2
- harfbuzz=4.2.0=h48644e2_0
- hdf5=1.12.1=nompi_h0aa1fa2_104
- htdp=1.0_isis7=1
- icu=69.1=he49afe7_0
- idna=3.4=pyhd8ed1ab_0
- ilmbase=2.5.5=hfab91a5_0
- inja=3.3.0=he49afe7_0
- isis=8.0.0=np125_0
- isl=0.25=hb486fe8_0
- jama=125=0
- jasper=2.0.33=h7c6fec8_1
- jemalloc=5.3.0=hf0c8a7f_0
- jpeg=9e=hb7f2c08_3
- kakadu=1=0
- krb5=1.21.1=hb884880_0
- lame=3.100=hb7f2c08_1003
- ld64=609=hc6ad406_13
- ld64_osx-64=609=hfd63004_13
- lerc=4.0.0=hb486fe8_0
- libblas=3.9.0=17_osx64_openblas
- libcblas=3.9.0=17_osx64_openblas
- libclang=13.0.1=root_62804_h2961583_3
- libclang-cpp14=14.0.6=default_hdb78580_1
- libcurl=8.2.1=h5f667d7_0
- libcxx=16.0.6=hd57cbcb_0
- libdeflate=1.14=hb7f2c08_0
- libedit=3.1.20191231=h0678c8f_2
- libelas=isis7=h01edc0c_0
- libev=4.33=haf1e3a3_1
- libevent=2.1.10=h7d65743_4
- libexpat=2.5.0=hf0c8a7f_1
- libffi=3.4.2=h0d85af4_5
- libgdal=3.5_isis8=hf8dc8b4_0
- libgfortran=5.0.0=12_3_0_h97931a8_1
- libgfortran-devel_osx-64=11.4.0=h01aa347_1
- libgfortran5=12.3.0=hbd3c1fe_1
- libglib=2.76.4=hc62aa5d_0
- libiconv=1.17=hac89ed1_0
- libidn2=2.3.4=hb7f2c08_0
- libjemalloc=5.3.0=hf0c8a7f_0
- liblapack=3.9.0=17_osx64_openblas
- liblapacke=3.9.0=17_osx64_openblas
- libllvm13=13.0.1=h64f94b2_2
- libllvm14=14.0.6=hc8e404f_4
- libnabo=isis7=h01edc0c_0
- libnghttp2=1.52.0=he2ab024_0
- libntlm=1.4=h0d85af4_1002
- libopenblas=0.3.23=openmp_h429af6e_0
- libopencv=4.5.5=py39hc2bf5a6_7
- libpng=1.6.39=ha978bb4_0
- libpointmatcher=isis7=ha5a8b8e_0
- libpq=14.5=h3df487d_7
- libprotobuf=3.19.6=hbc0c0cd_0
- libsqlite=3.42.0=h58db7d2_0
- libssh2=1.11.0=hd019ec5_0
- libtasn1=4.19.0=hb7f2c08_0
- libtiff=4.4.0=h6268bbc_5
- libunistring=0.9.10=h0d85af4_0
- libuv=1.46.0=h0c2f820_0
- libvpx=1.11.0=he49afe7_3
- libwebp-base=1.3.1=h0dc2134_0
- libxcb=1.15=hb7f2c08_0
- libxml2=2.9.14=h1faee8b_4
- libzlib=1.2.13=h8a1eda9_5
- llvm-openmp=16.0.6=hff08bdf_0
- llvm-tools=14.0.6=hc8e404f_4
- lz4-c=1.9.3=he49afe7_1
- macports-legacy-support=1.0.13=h0dc2134_0
- mesalib=23.1.4=hb59017c_0
- metis=5.1.0=he965462_1007
- mpc=1.3.1=h81bd1dd_0
- mpfr=4.2.0=h4f9bd69_0
- multiview=isis8=py39hf8dc8b4_0
- mysql=8.0.28=h88f4db0_2
- mysql-client=8.0.28=h7ddd48c_2
- mysql-common=8.0.28=hdd8d184_2
- mysql-devel=8.0.28=hdd8d184_2
- mysql-libs=8.0.28=h353f102_2
- mysql-server=8.0.28=h6edde1b_2
- nanoflann=1.4.2=h694c41f_0
- ncurses=6.4=hf0c8a7f_0
- nettle=3.8.1=h96f3785_1
- networkx=3.1=pyhd8ed1ab_0
- nlohmann_json=3.11.2=hbbd2c75_0
- nn=1.86.0=h0dc2134_2003
- nspr=4.35=hea0b92c_0
- nss=3.92=hd6ac835_0
- numpy=1.25.2=py39h892e69a_0
- openblas=0.3.23=openmp_hbefa662_0
- opencv=4.5.5=py39h6e9494a_7
- openexr=2.5.5=h7fa7ffa_0
- openh264=2.3.1=hf0c8a7f_2
- openjpeg=2.3.0=h3bf0609_1003
- openssl=3.1.2=h8a1eda9_0
- p11-kit=0.24.1=h65f8906_0
- packaging=23.1=pyhd8ed1ab_0
- parallel=20230722=h694c41f_0
- pbzip2=1.1.13=h9d27c22_1
- pcl=1.11.1=h7984e4d_1
- pcre2=10.40=h1c4e4bc_0
- pdal=2.4.2_asp3.3.0=py39hf8dc8b4_0
- perl=5.32.1=4_h0dc2134_perl5
- pip=23.2.1=pyhd8ed1ab_0
- pixman=0.40.0=hbcb3906_0
- platformdirs=3.10.0=pyhd8ed1ab_0
- pooch=1.7.0=pyha770c72_3
- proj=9.1.0=hcbd9701_0
- protobuf=3.19.6=py39h7a8716b_0
- pthread-stubs=0.4=hc929b4f_1001
- pvl=1.3.2=pyhd8ed1ab_0
- py-opencv=4.5.5=py39h71a6800_7
- pysocks=1.7.1=pyha2e5f31_6
- python=3.9.17=h07e1443_0_cpython
- python-dateutil=2.8.2=pyhd8ed1ab_0
- python_abi=3.9=3_cp39
- pytz=2023.3.post1=pyhd8ed1ab_0
- pyyaml=6.0.1=py39hdc70f33_0
- qhull=2020.2=h940c156_2
- qt=5.12.9=h2a607e2_5
- qwt=6.2.0=h0f3c874_2
- rapidjson=1.1.0=hb1e8313_1002
- rclone=1.63.1=h1762f63_0
- readline=8.2=h9e318b2_1
- requests=2.31.0=pyhd8ed1ab_0
- rhash=1.4.4=h0dc2134_0
- rocksdb=6.13.3=hbb73eaa_2
- s2p-subset=isis7=h01edc0c_0
- scipy=1.11.2=py39hded996c_0
- setuptools=68.1.2=pyhd8ed1ab_0
- sigtool=0.1.3=h88f4db0_0
- six=1.16.0=pyh6c4a22f_0
- snappy=1.1.10=h225ccf5_0
- spiceypy=6.0.0=pyhd8ed1ab_0
- sqlite=3.42.0=h2b0dec6_0
- suitesparse=5.10.1=h7aff33d_1
- superlu=5.2.2=h1f0f902_0
- svt-av1=1.3.0=hf0c8a7f_0
- tapi=1100.0.11=h9ce4665_0
- tbb=2021.10.0.custom_asp=h6b95b14_0
- tbb-devel=2021.10.0.custom_asp=h6b95b14_0
- tiledb=2.9.5=h86bd37b_0
- tk=8.6.12=h5dbffcc_0
- tnt=126=0
- typing-extensions=4.7.1=hd8ed1ab_0
- typing_extensions=4.7.1=pyha770c72_0
- tzdata=2023c=h71feb2d_0
- urllib3=2.0.4=pyhd8ed1ab_0
- usgscsm=1.7.0=h1c7c39f_1
- wheel=0.41.2=pyhd8ed1ab_0
- x264=1!164.3095=h775f41a_2
- x265=3.5=hbb4e6a2_3
- xerces-c=3.2.3=h6564042_4
- xorg-damageproto=1.2.1=h0d85af4_1002
- xorg-fixesproto=5.0=h0d85af4_1002
- xorg-glproto=1.4.17=h0d85af4_1002
- xorg-inputproto=2.3.2=h35c211d_1002
- xorg-kbproto=1.0.7=h35c211d_1002
- xorg-libice=1.1.1=h0dc2134_0
- xorg-libsm=1.2.4=h0dc2134_0
- xorg-libx11=1.8.6=hbd0b022_0
- xorg-libxau=1.0.11=h0dc2134_0
- xorg-libxdamage=1.1.5=h0d85af4_1
- xorg-libxdmcp=1.1.3=h35c211d_0
- xorg-libxext=1.3.4=hb7f2c08_2
- xorg-libxfixes=5.0.3=h0d85af4_1004
- xorg-libxi=1.7.10=h0d85af4_0
- xorg-libxrandr=1.5.2=h0d85af4_1
- xorg-libxrender=0.9.11=h0dc2134_0
- xorg-randrproto=1.5.0=h0d85af4_1001
- xorg-renderproto=0.11.1=h0d85af4_1002
- xorg-util-macros=1.19.3=h35c211d_0
- xorg-xextproto=7.3.0=hb7f2c08_1003
- xorg-xf86vidmodeproto=2.3.1=h0d85af4_1002
- xorg-xproto=7.0.31=h35c211d_1007
- xz=5.2.6=h775f41a_0
- yaml=0.2.5=h0d85af4_2
- zlib=1.2.13=h8a1eda9_5
- zstd=1.5.5=h829000d_0
================================================
FILE: conda/environment.yml
================================================
# Placeholder minimal asp deps conda environement. Not yet fully fleshed out.
# Use instead the ones at: conda/asp_deps_3.4.0*.yaml
name: asp_deps
channels:
- nasa-ames-stereo-pipeline
- usgs-astrogeology
- conda-forge
dependencies:
- pdal 2.6.0
- opencv 4.8.1
- cmake>=3.15
- csm
- eigen
- nlohmann_json
- proj
- sqlite>=3.11
================================================
FILE: conda/update_versions.py
================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Use dependency versions from a conda environment .yaml file to update
a recipe/meta.yaml file of a given package. Such an input file can
be created from the given environment with:
conda env export > myEnv.yaml
'''
import sys, os, re
if len(sys.argv) < 3:
print("Usage: " + os.path.basename(sys.argv[0]) + " input.yaml myPackage-feedstock")
sys.exit(1)
inFile = sys.argv[1]
outDir = sys.argv[2]
outFile = outDir + "/recipe/meta.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
# Recent versions of conda-build use recipe/recipe.yaml
outFile = outDir + "/recipe/recipe.yaml"
if not os.path.exists(outFile):
print("Cannot open file: " + outFile)
sys.exit(1)
else:
print("Found file: " + outFile)
# parse the versions from the conda env
conda_env = {}
print("Reading: " + inFile)
inHandle = open(inFile, 'r')
lines = inHandle.readlines()
for line in lines:
# Wipe comments
m = re.match(r'^(.*?)\#', line)
if m:
line = m.group(1)
# Match the package
m = re.match(r'^\s*-\s*(.*?)\s*=+\s*(.*?)(=|\s|$)', line)
if not m:
continue
package = m.group(1)
version = m.group(2)
if re.match(r'^\s*$', package):
continue # ignore empty lines
conda_env[package] = version
#print("got ", package, version)
# Update the lines in the output ile
outHandle = open(outFile, 'r')
lines = outHandle.readlines()
inDepSection = False
for it in range(len(lines)):
line = lines[it]
if 'requirements:' in line:
# We are at the beginning of the dependencies
inDepSection = True
continue
if not inDepSection:
# We are not in the dependencies section
continue
if 'test:' in line or 'tests:' in line or 'about:' in line:
# We are at the end of the dependencies
break
# Ignore comments
m = re.match(r'^\#', line)
if m:
continue
# Match the package
m = re.match(r'^(\s+-\s*)(.*?)([\s=]+)(.*?)$', line)
if not m:
continue
pre = m.group(1)
package = m.group(2)
separator = m.group(3).rstrip("\n")
old_version = m.group(4).rstrip("\n")
if separator == "":
# Ensure there's at least one space
separator = " "
if old_version == "":
# If there was no version before, print a warning, as sometimes the
# version chosen later won't be the expected one.
print("Warning: For package " + package + ", no version was specified, "
"this may lead to unexpected results.")
continue
if not package in conda_env:
continue
version = conda_env[package]
if old_version != version:
if ('[linux]' in old_version) or ('[osx]' in old_version):
# In this case the user better take a closer look
print("For package " + package + ", not replacing " +
old_version + " with " + version + ", a closer look is suggested.")
else:
print("For package " + package + ", replacing version "
+ old_version + " with " + version)
lines[it] = pre + package + separator + version + "\n"
# Save the updated lines to disk
print("Updating: " + outFile)
outHandle = open(outFile, "w")
outHandle.writelines(lines)
outHandle.close()
================================================
FILE: docs/Makefile
================================================
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SPHINXPROJ = AmesStereoPipeline
SOURCEDIR = .
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
================================================
FILE: docs/acknowledgements.rst
================================================
Credits
=======
.. include:: ../AUTHORS.rst
================================================
FILE: docs/bathy_water_masking.rst
================================================
.. _bathy_water_masking:
Bathy water masking
===================
For shallow water bathymetry (:numref:`bathy_intro`) it is important to
distinguish land pixels from water pixels. This allows the algorithm to focus on
underwater terrain while avoiding false depth estimates from land features.
A simple and commonly used approach is to threshold the near-infrared (NIR)
band 7, where water typically appears darker than land. This method is
described in :numref:`bathy_thresh`.
In complex coastal environments with vegetation, shadows, turbid water, or
shallow clear water, the NIR band alone may not provide sufficient separation.
This section describes alternative spectral indices that combine multiple bands
that may improve land-water discrimination.
Multispectral image bands
-------------------------
WorldView satellites capture multispectral imagery with eight bands.
.. list-table:: WorldView-3 multispectral bands
:widths: 10 30
:header-rows: 1
* - Band
- Name
* - 1
- Coastal
* - 2
- Blue
* - 3
- Green
* - 4
- Yellow
* - 5
- Red
* - 6
- Red Edge
* - 7
- NIR1 (Near-infrared 1)
* - 8
- NIR2 (Near-infrared 2)
Other vendors provide similar products.
Individual bands can be extracted from a multispectral image with
``gdal_translate`` (:numref:`gdal_tools`). For example, run this for the green
band (band 3)::
b=3
gdal_translate -b ${b} -co compress=lzw -co TILED=yes \
-co BLOCKXSIZE=256 -co BLOCKYSIZE=256 \
input.TIF input_b${b}.tif
The compression and tiling options help with the performance of ASP processing
later.
Water indices for land-water masking
------------------------------------
The following indices provide alternatives to band 7 (NIR1), as described
in :numref:`bathy_thresh`.
NDWI (Normalized Difference Water Index)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NDWI is computed as:
.. math::
\text{NDWI} = \frac{\text{Green} - \text{NIR}}{\text{Green} + \text{NIR}}
This index enhances the contrast between water and land. Water typically has
positive values, while land and vegetation have negative values. It is effective
for general water delineation and separates water from soil and terrestrial
vegetation well.
To compute NDWI using ``image_calc`` (:numref:`image_calc`), do::
image_calc -c "(var_0 - var_1) / (var_0 + var_1)" \
input_b3.tif input_b7.tif -o ndwi.tif
where ``input_b3.tif`` is the green band and ``input_b7.tif`` is the NIR band.
RNDVI (Reversed Normalized Difference Vegetation Index)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RNDVI is computed as:
.. math::
\text{RNDVI} = \frac{\text{Red} - \text{NIR}}{\text{Red} + \text{NIR}}
This is the inverse of the standard NDVI (which is high for vegetation). In
RNDVI, water appears bright (high values) while vegetation appears very dark
(low or negative values). This index is particularly effective in areas with
heavy vegetation along the shoreline, such as mangroves or dense forests, where
standard NIR masking may be ambiguous.
To compute RNDVI using ``image_calc`` run::
image_calc -c "(var_0 - var_1) / (var_0 + var_1)" \
input_b5.tif input_b7.tif -o rndvi.tif
where ``input_b5.tif`` is the red band and ``input_b7.tif`` is the NIR band.
OSI (Ocean/Sea Index)
~~~~~~~~~~~~~~~~~~~~~
OSI is computed as:
.. math::
\text{OSI} = \frac{\text{Green} + \text{Red}}{\text{Blue}}
This index uses the ratio of longer visible wavelengths to blue. It can be
useful for specific water conditions, though it is often less robust in clear
shallow water than NDWI or RNDVI.
The ``image_calc`` command is::
image_calc -c "(var_0 + var_1) / var_2" \
input_b3.tif input_b5.tif input_b2.tif -o osi.tif
where ``input_b3.tif`` is the green band, ``input_b5.tif`` is the red band,
and ``input_b2.tif`` is the blue band.
Thresholding
------------
The resulting index images (``ndwi.tif``, ``rndvi.tif``, ``osi.tif``) can be
converted to binary water masks. This requires computing an appropriate
threshold. Here's an example that invokes ``otsu_threshold``
(:numref:`otsu_threshold`) for that purpose, with ``ndwi.tif``:
::
otsu_threshold ndwi.tif
This will print the computed threshold to standard output. This value should
then be used in the masking command below.
Mask creation
-------------
When creating binary masks from these indices it is important to note the
following.
**Polarity reversal:** Unlike the raw NIR band (band 7) in
:numref:`bathy_thresh`, where water is darker than land, the spectral indices
NDWI and RNDVI make water appear brighter. This affects how binary masks are
created from thresholds.
The mask convention is that **land pixels have value 1** (or positive values)
and **water pixels have value 0** (or nodata).
For the NIR band, water pixels are *at or below* the threshold and land pixels are
*strictly above*, so the masking command is::
threshold=225
image_calc -c "gt(var_0, $threshold, 1, 0)" \
input_b7.tif -o land_mask.tif
For NDWI and RNDVI, water pixels are *at or above* the threshold and land pixels
are *strictly below*::
threshold=0.38
image_calc -c "lt(var_0, $threshold, 1, 0)" \
ndwi.tif -o land_mask.tif
Note the reversed comparison operator (``gt`` vs ``lt``) to maintain the
convention that land=1 and water=0.
**Site-specific performance:** The effectiveness of these indices varies
depending on local water conditions, bottom composition, turbidity, and
shoreline vegetation.
In testing, NDWI and NIR band 7 showed the most consistent thresholds across
images. RNDVI might be effective for vegetated shorelines but could require
site-specific threshold adjustment. The results with OSI were not as promising
in our experiments (for Key West, FL).
It is recommended to compute all indices and visually inspect the produced masks in
``stereo_gui`` (:numref:`stereo_gui`) before selecting the most appropriate one
for your specific site.
================================================
FILE: docs/bibliography.bib
================================================
%% This BibTeX bibliography file was created using BibDesk.
%% http://bibdesk.sourceforge.net/
%% Created for Ross Beyer at 2009-03-18 17:10:33 -0700
%% Saved with string encoding Unicode (UTF-8)
@article{2001JGR.10623429M,
Adsnote = {Provided by the NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2001JGR...10623429M&db_key=AST},
Author = {{Malin}, M.~C. and {Edgett}, K.~S.},
Date-Added = {2009-03-18 17:07:55 -0700},
Date-Modified = {2009-03-18 17:07:55 -0700},
Journal = {Journal of Geophysical Research},
Keywords = {spacecraft; MOC;},
Month = oct,
Number = {E10},
Pages = {23429-23570},
Title = {{{Mars Global Surveyor} {Mars Orbiter Camera}: Interplanetary cruise through primary mission}},
Volume = 106,
Year = 2001,
Bdsk-Url-1 = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2001JGR...10623429M&db_key=AST}}
@article{1992JGR.97.7699M,
Adsnote = {Provided by the NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1992JGR....97.7699M&db_key=AST},
Author = {{Malin}, M.~C. and {Danielson}, G.~E. and {Ingersoll}, A.~P. and {Masursky}, H. and {Veverka}, J. and {Ravine}, M.~A. and {Soulanille}, T.~A.},
Date-Added = {2009-03-18 17:07:19 -0700},
Date-Modified = {2009-03-18 17:07:19 -0700},
Hardcopy = {Yes},
Journal = {Journal of Geophysical Research},
Keywords = {spacecraft; MOC;},
Month = may,
Number = {E5},
Pages = {7699-7718},
Title = {{Mars Observer Camera}},
Volume = 97,
Year = 1992,
Bdsk-Url-1 = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1992JGR....97.7699M&db_key=AST}}
@article{2007SSRv..129..391C,
Adsnote = {Provided by the SAO/NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/abs/2007SSRv..129..391C},
Author = {{Chin}, G. and {Brylow}, S. and {Foote}, M. and {Garvin}, J. and {Kasper}, J. and {Keller}, J. and {Litvak}, M. and {Mitrofanov}, I. and {Paige}, D. and {Raney}, K. and {Robinson}, M. and {Sanin}, A. and {Smith}, D. and {Spence}, H. and {Spudis}, P. and {Stern}, S.~A. and {Zuber}, M.},
Date-Added = {2009-03-13 21:46:40 -0700},
Date-Modified = {2009-03-13 21:46:40 -0700},
Journal = {Space Science Reviews},
Keywords = {Moon, Lunar, Vision for Space Exploration, NASA, Spacecraft, Space instrumentation, Remote observation},
Month = apr,
Pages = {391-419},
Title = {{Lunar Reconnaissance Orbiter Overview: The Instrument Suite and Mission}},
Volume = 129,
Year = 2007,
Bdsk-Url-1 = {http://dx.doi.org/10.1007/s11214-007-9153-y},
Bdsk-Url-2 = {http://adsabs.harvard.edu/abs/2007SSRv..129..391C}}
@article{2008AGUFM.P31B1419N,
Adsnote = {Provided by the SAO/NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/abs/2008AGUFM.P31B1419N},
Author = {{Neumann}, G.~A. and {Lemoine}, F.~G. and {Mazarico}, E. and {McGarry}, J.~F. and {Rowlands}, D.~D. and {Smith}, D.~E. and {Sun}, X. and {Torrence}, M. and {Zagwodski}, T. and {Zellar}, R. and {Zuber}, M.~T.},
Date-Added = {2009-03-13 21:46:07 -0700},
Date-Modified = {2009-03-13 21:46:07 -0700},
Journal = {AGU Fall Meeting Abstracts},
Keywords = {1221 Lunar and planetary geodesy and gravity (5417, 5450, 5714, 5744, 6019, 6250)},
Month = dec,
Pages = {B1419+},
Title = {{Status of Lunar Reconnaissance Orbiter Laser Ranging and Laser Altimeter Experiments}},
Year = 2008,
Bdsk-Url-1 = {http://adsabs.harvard.edu/abs/2008AGUFM.P31B1419N}}
@article{1992JGR....97.7781Z,
Adsnote = {Provided by the NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1992JGR....97.7781Z&db_key=AST},
Author = {{Zuber}, M.~T. and {Smith}, D.~E. and {Solomon}, S.~C. and {Muhleman}, D.~O. and {Head}, J.~W. and {Garvin}, J.~B. and {Abshire}, J.~B. and {Bufton}, J.~L.},
Date-Added = {2009-03-13 21:38:33 -0700},
Date-Modified = {2009-03-13 21:38:33 -0700},
Journal = {Journal of Geophysical Research},
Keywords = {spacecraft; MOLA;},
Month = may,
Number = {E5},
Pages = {7781-7797},
Title = {{The {Mars Observer} laser altimeter investigation}},
Volume = 97,
Year = 1992,
Bdsk-Url-1 = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1992JGR....97.7781Z&db_key=AST}}
@article{2001JGR...10623689S,
Adsnote = {Provided by the NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2001JGR...10623689S&db_key=AST},
Author = {{Smith}, D.~E. and {Zuber}, M.~T. and {Frey}, H.~V. and {Garvin}, J.~B. and {Head}, J.~W. and {Muhleman}, D.~O. and {Pettengill}, G.~H. and {Phillips}, R.~J. and {Solomon}, S.~C. and {Zwally}, H.~J. and {Banerdt}, W.~B. and {Duxbury}, T.~C. and {Golombek}, M.~P. and {Lemoine}, F.~G. and {Neumann}, G.~A. and {et al.}},
Date-Added = {2009-03-13 21:37:55 -0700},
Date-Modified = {2009-03-13 21:37:55 -0700},
Journal = {Journal of Geophysical Research},
Keywords = {spacecraft; MOLA;},
Month = oct,
Number = {E10},
Pages = {23689-23722},
Title = {{{Mars Orbiter Laser Altimeter}: Experiment summary after the first year of global mapping of Mars}},
Volume = 106,
Year = 2001,
Bdsk-Url-1 = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2001JGR...10623689S&db_key=AST}}
@inproceedings{surf08,
Author = {Herbert Bay and Andreas Ess and Tinne Tuytelaars and Luc Van Gool},
Booktitle = {Computer Vision and Image Understanding (CVIU)},
Date-Added = {2009-03-13 17:08:04 -0700},
Date-Modified = {2009-03-13 17:10:41 -0700},
Number = {3},
Pages = {346-359},
Title = {{SURF}: Speeded Up Robust Features},
Url = {http://www.vision.ee.ethz.ch/~surf/},
Volume = {110},
Year = {2008},
Bdsk-Url-1 = {http://www.vision.ee.ethz.ch/~surf/}}
@article{triggs00,
Abstract = { This paper is a survey of the theory and methods of photogrammetric bundle adjustment, aimed at potential implementors in the computer vision community. Bundle adjustment is the problem of refining a visual reconstruction to produce jointly optimal structure and viewing parameter estimates. Topics covered include the choice of cost function and robustness numerical optimization including sparse Newton methods, linearly convergent approximations, updating and recursive methods gauge (datum) invariance and quality control. The theory is developed for general robust cost functions rather than restricting attention to traditional nonlinear least squares.
Keywords: Bundle Adjustment, Scene Reconstruction, Gauge Freedom, Sparse Matrices, Optimization.},
Author = {Triggs, Bill and Mclauchlan, Philip F. and Hartley, Richard I. and Fitzgibbon, Andrew W. },
Citeulike-Article-Id = {239461},
Date-Added = {2009-03-13 17:02:18 -0700},
Date-Modified = {2009-03-13 17:03:02 -0700},
Journal = {Lecture Notes in Computer Science},
Keywords = {3d, from, motion, reconstruction, structure},
Month = {January},
Pages = {298+},
Posted-At = {2008-06-11 14:50:00},
Priority = {2},
Title = {Bundle Adjustment -- A Modern Synthesis},
Volume = {1883},
Year = {2000},
Bdsk-Url-1 = {http://www.metapress.com/link.asp?id=PLVCRQ5BX753A2TN}}
@article{Pomerleau12comp,
author = {Pomerleau, Fran{\c c}ois and Colas, Francis and Siegwart, Roland and Magnenat, St{\'e}phane},
title = {{Comparing ICP Variants on Real-World Data Sets}},
journal = {Autonomous Robots},
year = {2013},
volume = {34},
number = {3},
pages = {133--148},
month = feb
}
@book{hartley04,
Author = {Hartley, R.~I. and Zisserman, A.},
Date-Added = {2009-03-13 16:53:28 -0700},
Date-Modified = {2009-03-13 16:53:28 -0700},
Edition = {Second},
Publisher = {Cambridge University Press, ISBN: 0521540518},
Title = {Multiple View Geometry in Computer Vision},
Year = {2004}}
@inproceedings{moore09,
Author = {Moore, Zach and Wright, Dan and Lewis, Chris and Schinstock, Dale},
Booktitle = {ASPRS Annual Conf., Baltimore, Maryland},
Date-Added = {2009-03-13 16:40:51 -0700},
Date-Modified = {2009-03-13 17:00:03 -0700},
Title = {Comparison of Bundle Adjustment Formulations},
Year = {2009}}
#MRO
@INPROCEEDINGS{ johnston03,
author = {Johnston, M. D. and Graf, J. E. and Zurek, R. W. and Eisen, H. J. and Jai, B.},
title = {{The Mars Reconnaissance Orbiter Mission}},
booktitle = {2003 IEEE Aerospace Conference},
pages = {447-464},
year = 2003
}
# LRO stuff
@INPROCEEDINGS{2005LPI....36.1576R,
author = {{Robinson}, M.~S. and {Eliason}, E.~M. and {Hiesinger}, H. and
{Jolliff}, B.~L. and {McEwen}, A.~S. and {Malin}, M.~C. and
{Ravine}, M.~A. and {Roberts}, D. and {Thomas}, P.~C. and {Turtle}, E.~P.
},
title = "{LROC -- Lunar Reconnaissance Orbiter Camera}",
booktitle = {Lunar and Planetary Science XXXVI},
year = 2005,
editor = {{Mackwell}, S. and {Stansbery}, E.},
month = mar,
number = {\#1576},
organization = {Lunar and Planetary Institute, Houston (CD-ROM)},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2005LPI....36.1576R&db_key=AST},
adsnote = {Provided by the Smithsonian/NASA Astrophysics Data System}
}
@INPROCEEDINGS{2006LPI....37.1949C,
author = {{Chin}, G. and {Bartels}, A. and {Brylow}, S. and {Foote}, M. and
{Garvin}, J. and {Kaspar}, J. and {Keller}, J. and {Mitrofanov}, I. and
{Raney}, K. and {Robinson}, M. and {Smith}, D. and {Spence}, H. and
{Spudis}, P. and {Stern}, S.~A. and {Zuber}, M.},
title = "{Lunar Reconnaissance Orbiter Overview: The Instrument Suite and Mission}",
booktitle = {Lunar and Planetary Science XXXVII},
year = 2006,
editor = {{Mackwell}, S. and {Stansbery}, E.},
month = mar,
pages = {\#1949},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2006LPI....37.1949C&db_key=AST},
adsnote = {Provided by the Smithsonian/NASA Astrophysics Data System}
}
% SOCET SET ?
@INPROCEEDINGS{2006LPI....37.2050K,
author = {{Kirk}, R.~L. and {Howington-Kraus}, E. and {Galuszka}, D. and
{Redding}, B. and {Hare}, T.~M. and {Heipke}, C. and {Oberst}, J. and
{Neukum}, G. and {HRSC Co-Investigator Team}},
title = "{Mapping Mars with HRSC, ISIS, and SOCET SET}",
booktitle = {Lunar and Planetary Science XXXVII},
year = 2006,
editor = {{Mackwell}, S. and {Stansbery}, E.},
month = mar,
pages = {\#2050},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2006LPI....37.2050K&db_key=AST},
adsnote = {Provided by the Smithsonian/NASA Astrophysics Data System}
}
% ISIS references:
@INPROCEEDINGS{2004LPI.35.2039A,
author = {{Anderson}, J.~A. and {Sides}, S.~C. and {Soltesz}, D.~L. and
{Sucharski}, T.~L. and {Becker}, K.~J.},
title = "{Modernization of the Integrated Software for Imagers and Spectrometers}",
booktitle = {Lunar and Planetary Science XXXV},
year = 2004,
editor = {{Mackwell}, S. and {Stansbery}, E.},
month = mar,
number = {\#2039},
organization = {Lunar and Planetary Institute, Houston (CD-ROM)},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2004LPI....35.2039A&db_key=AST},
adsnote = {Provided by the Smithsonian/NASA Astrophysics Data System}
}
@INPROCEEDINGS{1997LPI.28.387G,
author = {{Gaddis}, L. and {Anderson}, J. and {Becker}, K. and
{Becker}, T. and {Cook}, D. and {Edwards}, K. and
{Eliason}, E. and {Hare}, T. and {Kieffer}, H. and
{Lee}, E.~M. and {Mathews}, J. and {Soderblom}, L. and
{Sucharski}, T. and {Torson}, J. and {McEwen}, A. and
{Robinson}, M.},
title = "{An Overview of the Integrated Software for Imaging
Spectrometers (ISIS)}",
booktitle = {Lunar and Planetary Science Conference},
year = 1997,
month = mar,
volume = 28,
pages = {387},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1997LPI....28..387G&db_key=AST},
adsnote = {Provided by the NASA Astrophysics Data System}
}
@unpublished{
ISIS_website,
author = {U.S. Geological Survey, Flagstaff, AZ},
title = {Integrated Software for Imagers and Spectrometers ({ISIS})},
url = {http://isis.astrogeology.usgs.gov/},
year= {2009},
note = ""
}
@unpublished{
HiRISE_website,
author = {University of Arizona, Tuscon},
title = {The High Resolution Imaging Science Experiment},
url = {http://hirise.lpl.arizona.edu/},
year= {2009},
note =""
}
% SPICE stuff
@INPROCEEDINGS{1999LPI....30.1233A,
author = {{Acton}, C.~H.},
title = "{SPICE Products Available to the Planetary Science Community}",
booktitle = {Lunar and Planetary Science XXX},
year = 1999,
month = mar,
number = {\#1233},
organization = {Lunar and Planetary Institute, Houston (CD-ROM)},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1999LPI...
.30.1233A&db_key=AST},
adsnote = {Provided by the NASA Astrophysics Data System}
}
@INPROCEEDINGS{1999ficm.conf.6042A,
author = {{Acton}, C.~H. and {Bachman}, N.~J. and {Bytof}, J.~A. and
{Semenov}, B.~V. and {Taber}, W. and {Turner}, F.~S. and
{Wright}, E.~D.},
title = "{Examining {Mars} with SPICE}",
booktitle = {Fifth International Conference on Mars},
year = 1999,
month = jul,
number = {\#6042},
organization = {Lunar and Planetary Institute, Houston (CD-ROM)},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1999ficm.conf.6042A&db_key=AST},
adsnote = {Provided by the NASA Astrophysics Data System}
}
@ARTICLE{1996P&SS...44...65A,
author = {{Acton}, C.~H.},
title = "{Ancillary data services of NASA's Navigation and Ancillary
Information Facility}",
journal = {Planetary and Space Science},
year = 1996,
month = jan,
volume = 44,
pages = {65-70},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=1996P\%26SS...44...65A&db_key=AST},
adsnote = {Provided by the NASA Astrophysics Data System}
}
% HRSC Stuff:
@INCOLLECTION{2004mesp.book...17N,
author = {{Neukum}, G. and {Jaumann}, R.},
title = "{HRSC: the High Resolution Stereo Camera of Mars Express}",
booktitle = "Mars Express: the scientific payload",
editor = {{Wilson}, Andrew and {Chicarro}, Agustin},
publisher = "ESA Publications Division, Noordwijk, Netherlands",
number = {ESA SP-1240},
year = 2004,
month = aug,
pages = {17-35},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2004mesp.bo
ok...17N&db_key=AST},
adsnote = {Provided by the Smithsonian/NASA Astrophysics Data System}
}
@INPROCEEDINGS{ ebner04,
author = {Ebner H. and Spiegel M. and Albert B. and Bernd G. and Neukum G. et. al.},
title = {{Improving The Exterior Orientation of Mars Express Hrsc Imagery}},
booktitle = {XXth ISPRS Congress, Commission IV},
year = 2004
}
% SLOG Stereo matching
@ARTICLE{nishihara05,
author = {{Nishihara}, H.K.},
title = {{Practical real-time imaging stereo matcher}},
journal = {Optical Engineering},
volume = 23,
number = 5,
year = 1984,
pages = {{536-545}}
}
% Current ASP
@INPROCEEDINGS{edwards06,
author = {{Edwards}, L. and {Broxton}, M.},
title = {{Automated 3D Surface Reconstruction from Orbital Imagery}},
booktitle = {Proceedings of AIAA Space 2006},
location = {San Jose, California},
month = {September},
year = 2006
}
% Stereo pipeline work for MER
@INPROCEEDINGS{edwards05,
author = {{Edwards}, L. and {Bowman}, J. and {Kunz}, C. and {Lees}, D. and {Sims}, M.},
title = {{Photo-realistic Terrain Modeling and Visualization for Mars Exploration Rover Science Operations}},
booktitle = {Proceedings of IEEE SMC 2005},
location = {Hawaii, USA},
month = {October},
year = 2005
}
% Viz
@ARTICLE{nguyen01,
author = {Nguyen, L., et al.},
title = {{Virtual reality interfaces for visualization and control of remote vehicles}},
journal = {Autonomous Robots},
volume = 11,
number = 1,
year = 2001
}
% Original Stereo Pipeline
@ARTICLE{stoker99,
author = {Stoker, C. et al.},
title = {{Analyzing Pathfinder data using virtual reality and superresolved imaging}},
journal = {Journal of Geophysical Research},
volume = 104,
number = {E4},
pages = {8889-8906},
month = {April},
year = 1999
}
@ARTICLE{gupta97,
author = {Gupta, Rajiv and Hartley, Richard I.},
title = {{Linear Pushbroom Cameras}},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume = 19,
number = 9,
month = {September},
year = 1997
}
% HRSC DTM Comparison
@INPROCEEDINGS{heipke06,
author = {C. Heipke and J. Oberst et. al},
title = {{The HRSC DTM Test}},
booktitle = {Symposium of ISPRS Commission IV - Geo Spatial Databases for Sustainable Development},
year = 2006
}
% USGS analysis of MOC DTMs (some washboarding/DTM registration references)
@ARTICLE{kirk02,
author = {Kirk, R.L. and Soderblom, Laurence A. and
Howington-Kraus, Elipitha and Archinal, Brent},
title = {{USGS High-Resolution Topomapping of Mars with Mars Orbiter
Camera Narrow-Angle Images}},
journal = {IAPRS: GeoSpatial Theory, Processing and Applications},
volume = 34,
part = 4,
year = 2002
}
@INPROCEEDINGS{2002LPI....33.1845I,
author = {{Ivanov}, A.~B. and {Lorre}, J.~J.},
title = "{Analysis of Mars Orbiter Camera Stereo Pairs}",
booktitle = {Lunar and Planetary Institute Conference Abstracts},
year = 2002,
month = mar,
pages = {1845-+},
adsurl = {http://adsabs.harvard.edu/cgi-bin/nph-bib_query?bibcode=2002LPI....33.1845I&db_key=AST},
adsnote = {Provided by the Smithsonian/NASA Astrophysics Data System}
}
% SIFT
@ARTICLE{lowe04,
author = {Lowe, David G.},
title = {{Distinctive Image Features from Scale-Invariant Keypoints}},
journal = { International Journal of Computer Vision },
year = 2004
}
% RANSAC
@ARTICLE{ fischler81,
author = {Fischler, Martin A. and Bolles, Robert C.},
title = {{Random Sample Consensus: A Paradigm for Model Fitting with
Applications to Image Analysis and Automated
Cartography}},
journal = {Graphics and Image Processing},
volume = 24,
number = 6,
month = {June},
year = 1981
}
% Vision Workbench
@unpublished{
visionworkbench,
author = {NASA ARC Intelligent Systems Division},
title = {{NASA Vision Workbench}},
note = "NASA Ames Research Center, Moffett Field, CA. {\em http://ti.arc.nasa.gov/visionworkbench/}",
url = {http://ti.arc.nasa.gov/visionworkbench/}
}
@unpublished{ brady06,
author = {Brady, Tye},
title = {{ALHAT Requirements}},
note = "presentation at the Lunar Coordinate Systems Review Data Product Recommendation Meeting, NASA Ames Research Center, October 11.",
}
@string{ICCV = "International Conference on Computer Vision"}
@string{CVPR = "IEEE Computer Vision and Pattern Recognition or CVPR."}
@inproceedings{Stein06:attenuating,
author = "Andrew Stein and Andres Huertas and Larry Matthies",
title = "Attenuating Stereo Pixel-Locking via Affine Window Adaptation",
booktitle = "IEEE International Conference on Robotics and Automation",
pages = "914 - 921",
month = "May",
year = "2006"
}
@ARTICLE{Sun02rectangular,
author="Sun, Changming",
title="Fast Stereo Matching Using Rectangular Subregioning and 3D Maximum-Surface Techniques",
journal="International Journal of Computer Vision",
year="2002",
month="Apr",
day="01",
volume="47",
number="1",
pages="99--117",
issn="1573-1405",
url="https://doi.org/10.1023/A:1014585622703"
}
@ARTICLE{Nishihara84practical,
author = {H.K. Nishihara},
title = {{PRISM: A Practical real-time imaging stereo matcher}},
journal = {{Optical Engineering}},
volume = {23},
number = {5},
year = {1984},
pages = {536-545}
}
@ARTICLE{Szeliski03sampling,
author = "Richard Szeliski and Daniel Scharstein",
title = {{Sampling the Disparity Space Image}},
journal = {{IEEE Transactions on Pattern Analysis and Machine Intelligence (PAMI)}},
year = "2003",
volume = "26",
pages = "419 - 425"
}
@inproceedings{robinson05:lroc,
author = "M.S. Robinson and E.M. Eliason and H. Hiesinger and
B.L. Jolliff and A.S. McEwen and M.C. Malin and
M.A. Ravine and D. Roberts and P.C. Thomas and
E.P. Turtle",
title = {{LROC - Lunar Reconaissance Orbiter Camera}},
booktitle = "Proc of the Lunar and Planetary Science Conference (LPSC) XXXVI",
pages = "1576",
month = "March",
year = "2005"
}
@inproceedings{anderson08:isis,
author = "J.A. Anderson",
title = {{ISIS Camera Model Design}},
booktitle = "Proc of the Lunar and Planetary Science Conference (LPSC) XXXIX",
pages = "2159",
month = "March",
year = "2008"
}
@inproceedings{lawrence08:apollo,
author = "S. J. Lawrence and M. S. Robinson and M. Broxton and
J. D. Stopar and W. Close and J. Grunsfeld and
R. Ingram and L. Jefferson and S. Locke and
R. Mitchell and T. Scarsella and M. White and
M. A. Hager and T. R. Watters ad E. Bowman-Cisneros
and J. Danton and J. Garvin",
title = {{The Apollo Digital Image Archive: New Research and Data Products}},
booktitle = "Proc of the NLSI Lunar Science Conference",
pages = "2066",
year = "2008"
}
@ARTICLE{Baker04:lucas-kanade,
author="Baker, Simon and Matthews, Iain",
title="Lucas-Kanade 20 Years On: A Unifying Framework",
journal="International Journal of Computer Vision",
year="2004",
month="Feb",
day="01",
volume="56",
number="3",
pages="221--255",
issn="1573-1405",
url="https://doi.org/10.1023/B:VISI.0000011205.11775.fd"
}
@phdthesis{Menard97:robust,
author = {Christian Menard},
title = {{Robust Stereo and Adaptive Matching in Correlation Scale-Space}},
school = {Institute of Automation, Vienna Institute of Technology (PRIP-TR-45)},
year = "1997",
month = "January"
}
@article{Nehab05:improved,
author = {Diego Nehab and Szymon Rusinkiewicz and James Davis},
title = {Improved Sub-pixel Stereo Correspondences through Symmetric Refinement},
journal ={Computer Vision, IEEE International Conference on},
volume = {1},
year = {2005},
issn = {1550-5499},
pages = {557-563},
publisher = {IEEE Computer Society},
address = {Los Alamitos, CA, USA},
}
@ARTICLE{cheng04:bayesian,
title={Bayesian Stereo Matching},
author={ Li Cheng and Caelli, T.},
journal={Computer Vision and Pattern Recognition Workshop, 2004. CVPRW '04. Conference on},
year={2004},
month={June},
volume={},
number={},
pages={ 192-192},
ISSN={}, }
@inproceedings{nefian:bayes_em,
title={{A Bayesian Formulation for Subpixel Refinement in Stereo Orbital Imagery}},
author={ Ara V. Nefian and Kyle Husmann and Michael Broxton and Mattew D. Hancher and Michael Lundy},
booktitle = "to appear in the Proceedings of the 2009 IEEE International Conference on Image Processing",
year = "2009"
}
@inproceedings{broxton:isvc09,
title={{ 3D Lunar Terrain Reconstruction from Apollo Images }},
author={ Michael Broxton and Ara V. Nefian and Zachary Moratto and Taemin Kim and Michael Lundy and Aleksandr V. Segal },
booktitle = {{to appear in the Proceedings of the 5th International Symposium on Visual Computing}},
year = "2009"
}
@inproceedings{konolige:sparsesparse,
title = {Sparse Sparse Bundle Adjustment},
booktitle = {British Machine Vision Conference},
year = {2010},
month = {08/2010},
address = {Aberystwyth, Wales},
keywords = {computer vision, perception},
attachments = {http://www.willowgarage.com/sites/default/files/ssba.pdf},
author = {Kurt Konolige}
}
@article{cholmod,
author = {Chen, Yanqing and Davis, Timothy A. and Hager, William W. and Rajamanickam, Sivasankaran},
title = {Algorithm 887: CHOLMOD, Supernodal Sparse Cholesky Factorization and Update/Downdate},
journal = {ACM Trans. Math. Softw.},
issue_date = {October 2008},
volume = {35},
number = {3},
month = oct,
year = {2008},
issn = {0098-3500},
pages = {22:1--22:14},
articleno = {22},
numpages = {14},
url = {http://doi.acm.org/10.1145/1391989.1391995},
acmid = {1391995},
publisher = {ACM},
address = {New York, NY, USA},
keywords = {Cholesky factorization, linear equations, sparse matrices},
}
@misc{lunokhod:controlnetwork,
author = {Moratto, Zachary},
title = {Creating Control Networks and Bundle Adjusting with ISIS3},
journal = {Lunokhod},
type = {Blog},
number = {March 5},
year = {2012},
howpublished = {\url{http://lunokhod.org/?p=468}}
}
@misc{lunokhod:gcp,
author = {Moratto, Zachary},
title = {Making well registered DEMs with ISIS and Ames Stereo Pipeline},
journal = {Lunokhod},
type = {Blog},
number = {April 14},
year = {2012},
howpublished = {\url{http://lunokhod.org/?p=559}}
}
@unpublished{isis:documentation,
author = {USGS Astrogeology Science Center},
title = {{USGS ISIS Documentation}},
note = "Isis 3 Application Documentation {\em http://isis.astrogeology.usgs.gov/Application/index.html}",
url = {http://isis.astrogeology.usgs.gov/Application/index.html}
}
@misc{cgiar:srtm90m,
author = {The CGIAR Consortium for Spatial Information},
title = {{CGIAR-CSI SRTM 90m DEM Digital Elevation Database}},
url = {http://srtm.csi.cgiar.org}
}
@unpublished{digital-globe:samples,
author = {Digital Globe},
title = {{Satellite Imagery and Geospatial Information Products}},
note = "Digital Globe sample imagery {\em https://www.digitalglobe.com/samples}",
url = {https://www.digitalglobe.com/samples}
}
@unpublished{digital-globe:camera,
author = {Digital Globe},
title = {{Radiometric Use of WorldView 2 Imagery}},
note = "Description of the WV02 camera",
url = {https://dg-cms-uploads-production.s3.amazonaws.com/uploads/document/file/104/Radiometric_Use_of_WorldView-2_Imagery.pdf}
}
@misc{planetaryblog:vesta,
author = {Machacek, Daniel},
title = {{Images from the long-awaited Dawn Vesta data set}},
journal = {The Planetary Society},
type = {Guest Blog},
number = {November 29},
year = {2012},
howpublished = {\url{http://www.planetary.org/blogs/guest-blogs/20121129-machacek-dawn-vesta.html}}
}
% Multi-view triangulation
@unpublished{slabaugh2001optimal,
title={Optimal ray intersection for computing 3d points from n-view correspondences},
author={Slabaugh, Greg and Schafer, Ron and Livingston, Mark},
year={2001},
note={},
url={http://www.gregslabaugh.net/publications/opray.pdf}
}
% Lunar lambertian
@article{mcewen1991photometric,
title={Photometric functions for photoclinometry and other applications},
author={McEwen, Alfred S},
journal={Icarus},
volume={92},
number={2},
pages={298--311},
year={1991},
publisher={Elsevier}
}
@article{lohse2006derivation,
title={Derivation of planetary topography using multi-image shape-from-shading},
author={Lohse, Volker and Heipke, Christian and Kirk, Randolph L},
journal={Planetary and space science},
volume={54},
number={7},
pages={661--674},
year={2006},
publisher={Elsevier}
}
@article{abrams2002aster,
title={ASTER User Handbook, version 2},
author={Abrams, Michael and Hook, Simon and Ramachandran, Bhaskar},
journal={Jet Propulsion Laboratory},
volume={4800},
pages={135},
year={2002}
}
@article{girod2015improvement,
title={Improvement of DEM generation from ASTER images using satellite jitter estimation and open source implementation},
author={Girod, L and Nuth, C and K{\"a}{\"a}b, A},
journal={The International Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences},
volume={40},
number={1},
pages={249},
year={2015},
publisher={Copernicus GmbH}
}
%% This BibTeX bibliography file was created using BibDesk.
%% http://bibdesk.sourceforge.net/
%% Created for Ross Beyer at 2014-03-24 17:05:49 -0700
%% Saved with string encoding Unicode (UTF-8)
@article{2012P&SS...71...64D,
Adsnote = {Provided by the SAO/NASA Astrophysics Data System},
Adsurl = {http://adsabs.harvard.edu/abs/2012P%26SS...71...64D},
Author = {{Debei}, S. and {Aboudan}, A. and {Colombatti}, G. and {Pertile}, M.},
Date-Added = {2014-03-25 00:05:00 +0000},
Date-Modified = {2014-03-25 00:05:10 +0000},
Journal = {Planetary and Space Science},
Month = oct,
Pages = {64-72},
Title = {{Lutetia surface reconstruction and uncertainty analysis}},
Volume = 71,
Year = 2012,
Bdsk-Url-1 = {http://dx.doi.org/10.1016/j.pss.2012.07.013}
}
@inproceedings{Kunz:2010zr,
Annote = {References Broxton et al., but doesn't really use ASP.},
Author = {Kunz, C. and Singh, H.},
Booktitle = {Autonomous Underwater Vehicles (AUV), 2010 IEEE/OES},
Date-Added = {2013-04-26 19:59:28 +0000},
Date-Modified = {2013-04-26 19:59:32 +0000},
Issn = {1522-3167},
Keywords = {SLAM (robots);calibration;distance measurement;image matching;mobile robots;pose estimation;remotely operated vehicles;robot vision;stereo image processing;underwater vehicles;AUV mapping system;SeaBED AUV;camera calibration;frame-to-frame visual feature matching;full 3D structure recovering;odometry-based pose estimation;on-board vehicle navigation sensor;robot;seafloor mapping;simultaneous localization and mapping;stereo calibration;stereo self-calibration;stereo vision;stereo visual odometry;structure from motion;subpixel stereo correspondence estimation;visual maps;Calibration;Cameras;Image reconstruction;Navigation;Sea measurements;Vehicles;Visualization},
Pages = {1-7},
Title = {Stereo self-calibration for seafloor mapping using AUVs},
Year = {2010},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/AUV.2010.5779655}}
@article{Kim20092095,
Annote = {Mentions, but does not use ASP.},
Author = {J.R. Kim and J.-P. Muller},
Date-Added = {2013-04-26 19:03:21 +0000},
Date-Modified = {2013-04-26 19:03:26 +0000},
Issn = {0032-0633},
Journal = {Planetary and Space Science},
Keywords = {Slope},
Number = {14--15},
Pages = {2095 - 2112},
Title = {Multi-resolution topographic data extraction from Martian stereo imagery},
Url = {http://www.sciencedirect.com/science/article/pii/S0032063309002888},
Volume = {57},
Year = {2009},
Bdsk-Url-1 = {http://www.sciencedirect.com/science/article/pii/S0032063309002888},
Bdsk-Url-2 = {http://dx.doi.org/10.1016/j.pss.2009.09.024}}
@inproceedings{Schwendner:vn,
Annote = {Doesn't actually use ASP, just mentions it as an example.},
Author = {Schwendner, Jakob and Hidalgo, Javier},
Booktitle = {International Symposium on Artificial Intelligence, Robotics and Automation in Space (i-SAIRAS)},
Date-Added = {2013-04-26 18:58:34 +0000},
Date-Modified = {2013-04-26 19:01:09 +0000},
Month = {September},
Title = {Terrain Aided navgiation for Plaentary Exploration Missions},
Year = {2012}}
@article{Debei201264,
Annote = {ASP is mentioned as an example, but I don't think this text is 'using' ASP for their results.},
Author = {Stefano Debei and Alessio Aboudan and Giacomo Colombatti and Marco Pertile},
Date-Added = {2013-04-26 18:54:17 +0000},
Date-Modified = {2013-04-26 18:54:26 +0000},
Issn = {0032-0633},
Journal = {Planetary and Space Science},
Keywords = {Uncertainty},
Number = {1},
Pages = {64 - 72},
Title = {Lutetia surface reconstruction and uncertainty analysis},
Url = {http://www.sciencedirect.com/science/article/pii/S0032063312002073},
Volume = {71},
Year = {2012},
Bdsk-Url-1 = {http://www.sciencedirect.com/science/article/pii/S0032063312002073},
Bdsk-Url-2 = {http://dx.doi.org/10.1016/j.pss.2012.07.013}}
@article{Li:2011fk,
Annote = {Doesn't really "use" ASP, just obliquely talks about it.},
Author = {Rongxing Li and Juwon Hwangbo and Yunhang Chen and Kaichang Di},
Date-Added = {2013-04-26 18:23:29 +0000},
Date-Modified = {2013-04-26 18:24:06 +0000},
Issn = {0196-2892},
Journal = {Geoscience and Remote Sensing, IEEE Transactions on},
Number = {7},
Pages = {2558-2572},
Title = {Rigorous Photogrammetric Processing of HiRISE Stereo Imagery for Mars Topographic Mapping},
Volume = {49},
Year = {2011},
Bdsk-Url-1 = {http://dx.doi.org/10.1109/TGRS.2011.2107522}
}
@article{hirschmuller_sgm_original,
title={Stereo Processing by Semiglobal Matching and Mutual Information},
author={Hirschm{\"u}ller, Heiko},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
volume={30},
pages={328--341},
year={2008},
publisher={IEEE Computer Society}
}
@article{hirschmuller_hrsc_with_sgm,
title={Stereo processing of HRSC Mars Express images by semi-global matching},
author={Hirschm{\"u}ller, Heiko and Mayer, Helmut and Neukum, G and others},
journal={Int. Arch. Photogramm. Remote Sensing Spatial Inf. Sci},
volume={36},
pages={305--310},
year={2006}
}
@inproceedings{xiang_2016_low_complexity_fsgm,
title={Low complexity optical flow using neighbor-guided semi-global matching},
author={Xiang, Jiang and Li, Ziyun and Blaauw, David and Kim, Hun Seok and Chakrabarti, Chaitali},
booktitle={2016 IEEE International Conference on Image Processing (ICIP)},
pages={4483--4487},
year={2016},
organization={IEEE}
}
@inproceedings{rothermel2012sure_isgm,
title={SURE: Photogrammetric surface reconstruction from imagery},
author={Rothermel, Mathias and Wenzel, Konrad and Fritsch, Dieter and Haala, Norbert},
booktitle={Proceedings LC3D Workshop, Berlin},
volume={8},
year={2012}
}
@article{hapke2008bidirectional,
title={Bidirectional reflectance spectroscopy: 6. Effects of porosity},
author={Hapke, Bruce},
journal={Icarus},
volume={195},
number={2},
pages={918--926},
year={2008},
publisher={Elsevier}
}
@article{hapke1993opposition,
title={The opposition effect of the moon: The contribution of coherent backscatter},
author={Hapke, Bruce W and Nelson, Robert M and Smythe, William D},
journal={Science},
volume={260},
number={5107},
pages={509--511},
year={1993}
}
@article{johnson2006spectrophotometric,
title={Spectrophotometric properties of materials observed by Pancam on the Mars Exploration Rovers: 1. Spirit},
author={Johnson, Jeffrey R and Grundy, William M and Lemmon, Mark T and Bell, James F and Johnson, Miles J and Deen, Robert G and Arvidson, Raymond E and Farrand, William H and Guinness, Edward A and Hayes, Alexander G and others},
journal={Journal of Geophysical Research: Planets},
volume={111},
number={E2},
year={2006},
publisher={Wiley Online Library}
}
@article{fernando2013surface,
title={Surface reflectance of Mars observed by CRISM/MRO: 2. Estimation of surface photometric properties in Gusev Crater and Meridiani Planum},
author={Fernando, J and Schmidt, F and Ceamanos, X and Pinet, P and Dout{\'e}, S and Daydou, Y},
journal={Journal of Geophysical Research: Planets},
volume={118},
number={3},
pages={534--559},
year={2013},
publisher={Wiley Online Library}
}
@inproceedings{smith2011results,
title={Results from the Lunar Orbiter Laser Altimeter (LOLA): global, high resolution topographic mapping of the Moon},
author={Smith, DE and Zuber, MT and Neumann, GA and Mazarico, E and Head, J and Torrence, MH and others},
booktitle={Lunar and Planetary Science Conference},
volume={42},
pages={2350},
year={2011}
}
@inproceedings{zabih1994census,
title={Non-parametric local transforms for computing visual correspondence},
author={Zabih, Ramin and Woodfill, John},
booktitle={European conference on computer vision},
pages={151--158},
year={1994},
organization={Springer}
}
@article{hua2016texture,
title={Texture-Aware Dense Image Matching Using Ternary Census Transform},
author={Hua, Han and Chenb, Chongtai and Wua, Bo and Yangc, Xiaoxia and Zhub, Qing and Dingb, Yulin},
journal={ISPRS Annals of Photogrammetry, Remote Sensing and Spatial Information Sciences},
pages={59--66},
year={2016}
}
@inproceedings{facciolo2015mgm,
title={Mgm: A significantly more global matching for stereovision},
author={Facciolo, Gabriele and De Franchis, Carlo and Meinhardt, Enric},
booktitle={Proceedings of the British Machine Vision Conference (BMVC), BMVA Press},
pages={90--1},
year={2015}
}
@article{asp2018,
author = {Beyer, Ross A. and Alexandrov, Oleg and McMichael, Scott},
title = {The {Ames Stereo Pipeline}: {NASA}'s Open Source Software for Deriving and Processing Terrain Data},
journal = {Earth and Space Science},
volume = {},
number = {},
pages = {}
}
@article{guizar2008efficient,
title={Efficient subpixel image registration algorithms},
author={Guizar-Sicairos, Manuel and Thurman, Samuel T and Fienup, James R},
journal={Optics letters},
volume={33},
number={2},
pages={156--158},
year={2008},
publisher={Optical Society of America}
}
@inproceedings{toldo2010global,
title={Global registration of multiple point clouds embedding the generalized procrustes analysis into an ICP framework},
author={Toldo, Roberto and Beinat, Alberto and Crosilla, Fabio},
booktitle={Proc. 3DPVT},
pages={109--122},
year={2010}
}
@article{sohn2004mathematical,
title={Mathematical modelling of historical reconnaissance CORONA KH-4B imagery},
author={Sohn, Hong-Gyoo and Kim, Gi-Hong and Yom, Jae-Hong},
journal={The Photogrammetric Record},
volume={19},
number={105},
pages={51--66},
year={2004},
publisher={Wiley Online Library}
}
@inproceedings{schenk2003rigorous,
title={Rigorous panoramic camera model for DISP imagery},
author={Schenk, Toni and Csatho, Beata and Shin, Sung Woong},
booktitle={Proceedings of the ISPRS Workshop: High Resolution Mapping from Space},
year={2003}
}
@inproceedings{hare2017community,
title={Community Sensor Model standard for the planetary domain},
author={Hare, TM and Kirk, RL},
booktitle={Lunar and Planetary Science Conference},
volume={48},
year={2017}
}
@article{tsai1987,
author={R. {Tsai}},
journal={IEEE Journal on Robotics and Automation},
title={A versatile camera calibration technique for high-accuracy 3D machine vision metrology using off-the-shelf TV cameras and lenses},
year={1987},
volume={3},
number={4},
pages={323-344},
keywords={Calibration;Machine vision;Measurement;Cameras;Calibration;Machine vision;Metrology;TV;Lenses;Robot vision systems;Robotic assembly;Robot kinematics;Application software},
doi={10.1109/JRA.1987.1087109},
ISSN={2374-8710},
month={August}
}
@article{brown1971,
author={Brown, D. C.},
journal={Photogrammetric Engineering},
title={Close-Range Camera Calibration},
year=1971,
volume=37,
number=8,
pages={855-866}
}
@article{brown1966,
author={Brown, D. C.},
journal={Photogrammetric Engineering},
title={Decentering Distortion of Lenses},
year=1966,
volume=32,
number=3,
pages={444-462}
}
@article{2019EA000713,
author = {Laura, J.R. and Mapel, J. and Hare, T.},
title = {Planetary Sensor Models Interoperability using the Community Sensor Model Specification},
year = 2020,
journal = {Earth and Space Science},
pages = {e2019EA000713},
doi = {10.1029/2019EA000713},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/2019EA000713},
eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/2019EA000713},
note = {e2019EA000713 2019EA000713}
}
@techreport{CSMTRD,
author = {Community Sensor Model Working Group},
title = {{Community Sensor Model (CSM) Technical Requirements Document (TRD)}},
institution = {National Geospatial Intelligence Agency},
year = 2007,
url = {https://gwg.nga.mil/documents/csmwg/CSM_TRD__Version_2.A%20Revision_2007_Aug_01__for_DISR.pdf}
}
@INPROCEEDINGS{2015LPI462703B,
author = {{Becker}, K.~J. and {Archinal}, B.~A. and {Hare}, T.~H. and
{Kirk}, R.~L. and {Howington-Kraus}, E. and {Robinson}, M.~S. and
{Rosiek}, M.~R.},
title = "{Criteria for Automated Identification of Stereo Image Pairs}",
booktitle = {Lunar and Planetary Science Conference},
year = 2015,
series = {Lunar and Planetary Science Conference},
month = mar,
pages = {2703},
url = {https://ui.adsabs.harvard.edu/abs/2015LPI....46.2703B},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@article{thormahlen1985refractive,
title={Refractive index of water and its dependence on wavelength, temperature, and density},
author={Thorm{\"a}hlen, I and Straub, Johannes and Grigull, Ulrich},
journal={Journal of physical and chemical reference data},
volume={14},
number={4},
pages={933--945},
year={1985},
publisher={American Institute of Physics for the National Institute of Standards and~…}
}
@article{harvey1998calibration,
title={Calibration stability of an underwater stereo-video system: implications for measurement accuracy and precision},
author={Harvey, Evan S and Shortis, Mark R},
journal={Marine Technology Society Journal},
volume={32},
number={2},
pages={3--17},
year={1998},
publisher={Washington, DC: Marine Technology Society}
}
@article{austin1976index,
title={The index of refraction of seawater, 121 pp},
author={Austin, RW and Halikas, G},
journal={Scripps Inst. of Oceanogr., Ia Jolla, Calif},
year={1976}
}
@book{mobley1995optical,
title={The optical properties of water. Handbook of Optics, ed Bass M},
author={Mobley, CD},
year={1995},
publisher={McGraw-Hill, New York),}
}
@book{jerlov1976marine,
title={Marine optics},
author={Jerlov, Nils Gunnar},
year={1976},
publisher={Elsevier}
}
@article{tukey1977exploratory,
title={Exploratory Data Analysis Addision-Wesley},
author={Tukey, John W},
journal={Reading, Ma},
volume={688},
year={1977}
}
@article{buades2015reliable,
title={Reliable multiscale and multiwindow stereo matching},
author={Buades, Antoni and Facciolo, Gabriele},
journal={SIAM Journal on Imaging Sciences},
volume={8},
number={2},
pages={888--915},
year={2015},
publisher={SIAM}
}
@INPROCEEDINGS{Geiger2010ACCV,
author = {Andreas Geiger and Martin Roser and Raquel Urtasun},
title = {Efficient Large-Scale Stereo Matching},
booktitle = {Asian Conference on Computer Vision (ACCV)},
year = {2010}
}
@inproceedings{de2014automatic,
title={An automatic and modular stereo pipeline for pushbroom images},
author={De Franchis, Carlo and Meinhardt-Llopis, Enric and Michel, Julien and Morel, Jean-Michel and Facciolo, Gabriele},
booktitle={ISPRS Annals of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
year={2014}
}
@article{tao2018massive,
title={Massive stereo-based DTM production for Mars on cloud computers},
author={Tao, Y and Muller, J-P and Sidiropoulos, P and Xiong, Si-Ting and Putri, ARD and Walter, SHG and Veitch-Michaelis, J and Yershov, V},
journal={Planetary and Space Science},
volume={154},
pages={30--58},
year={2018},
publisher={Elsevier}
}
@article{nugent1966velocity,
title={Velocity aberration and atmospheric refraction in satellite laser communication experiments},
author={Nugent, LJ and Condon, RJ},
journal={Applied optics},
volume={5},
number={11},
pages={1832--1837},
year={1966},
publisher={Optical Society of America}
}
@article{bhushan2021automated,
title={Automated digital elevation model (DEM) generation from very-high-resolution Planet SkySat triplet stereo and video imagery},
author={Bhushan, Shashank and Shean, David and Alexandrov, Oleg and Henderson, Scott},
journal={ISPRS Journal of Photogrammetry and Remote Sensing},
volume={173},
pages={151--165},
year={2021},
publisher={Elsevier}
}
@article{palaseanu2021bathy,
title = {Open source satellite derived bathymetry module for NASA Ames Stereo Pipeline},
author = {Palaseanu, Monica and Alexandrov, Oleg and Danielson, Jeff},
journal = {Earth and Space Science Open Archive},
pages = {4},
year = {2021},
doi = {10.1002/essoar.10509185.1},
url = {https://doi.org/10.1002/essoar.10509185.1}
}
@article{alexandrov2018multiview,
title={Multiview Shape-From-Shading for Planetary Images},
author={Alexandrov, Oleg and Beyer, Ross A},
journal={Earth and Space Science},
volume={5},
number={10},
pages={652--666},
year={2018},
publisher={Wiley Online Library}
}
@article{tao2016optimised,
title={An optimised system for generating multi-resolution DTMS using NASA DTMS datasets},
author={Tao, Y and Muller, JP and Sidiropoulos, P and Veitch-Michaelis, J and Yershov, V},
journal={ISPRS-International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences},
volume={41},
pages={115--121},
year={2016},
publisher={Copernicus GmbH}
}
@article{sidiropoulos2018automatic,
title={Automatic coregistration and orthorectification (ACRO) and subsequent mosaicing of NASA high-resolution imagery over the Mars MC11 quadrangle, using HRSC as a baseline},
author={Sidiropoulos, Panagiotis and Muller, Jan-Peter and Watson, Gillian and Michael, Gregory and Walter, Sebastian},
journal={Planetary and Space Science},
volume={151},
pages={33--42},
year={2018},
publisher={Elsevier}
}
@article{remy2002gstl,
title={GsTL: The geostatistical template library in C++},
author={Remy, Nicolas and Shtuka, Arben and Levy, Bruno and Caers, Jef},
journal={Computers \& Geosciences},
volume={28},
number={8},
pages={971--979},
year={2002},
publisher={Elsevier}
}
@article{ohman2015procedure,
title={Procedure for processing LRO WAC monochromatic images with ISIS 3 for photogeologic purposes},
author={{\"O}hman, Teemu},
url = {https://www.lpi.usra.edu/lunar/tools/dems/Ohman_mono-WAC_ISIS_procedure_v1-1.pdf},
journal={Lunar and Planetary Institute},
year={2015}
}
@article{kirk2016semi,
title={A semi-rigorous sensor model for precision geometric processing of mini-RF bistatic radar images of the moon},
author={Kirk, RL and Barrett, JM and Wahl, DE and Erteza, I and Jackowatz, CV and Yocky, DA and Turner, S and Bussey, DBJ and Paterson, GW},
journal={The International Archives of Photogrammetry, Remote Sensing and Spatial Information Sciences},
volume={41},
pages={425},
year={2016},
publisher={Copernicus GmbH}
}
@inproceedings{kirk2011radargrammetric,
title={Radargrammetric Analysis of Mini-RF Lunar Images},
author={Kirk, RL and Howington-Kraus, E and Becker, TL and Cook, D and Barrett, JM and Neish, CD and Thomson, BJ and Bussey, DBJ},
booktitle={EPSC-DPS Joint Meeting 2011},
volume={2011},
pages={1473},
year={2011}
}
@article{shin2012progressively,
title={Progressively weighted affine adaptive correlation matching for quasi-dense 3D reconstruction},
author={Shin, Dongjoe and Muller, Jan-Peter},
journal={Pattern Recognition},
volume={45},
number={10},
pages={3795--3809},
year={2012},
publisher={Elsevier}
}
@article{otto1989region,
title={‘Region-growing’algorithm for matching of terrain images},
author={Otto, G Paul and Chau, Tony KW},
journal={Image and vision computing},
volume={7},
number={2},
pages={83--94},
year={1989},
publisher={Elsevier}
}
@article{bertone2023highly,
title={Highly Resolved Topography and Illumination at Mercury’s South Pole from MESSENGER MDIS NAC},
author={Bertone, Stefano and Mazarico, Erwan and Barker, Michael K and Siegler, Matthew A and Martinez-Camacho, Jose M and Hamill, Colin D and Glantzberg, Allison K and Chabot, Nancy L},
journal={The Planetary Science Journal},
volume={4},
number={2},
pages={21},
year={2023},
publisher={IOP Publishing}
}
@article{caravaca20203d,
title={3D digital outcrop model reconstruction of the Kimberley outcrop (Gale crater, Mars) and its integration into Virtual Reality for simulated geological analysis},
author={Caravaca, Gw{\'e}na{\"e}l and Le Mou{\'e}lic, St{\'e}phane and Mangold, Nicolas and L’Haridon, Jonas and Le Deit, Laetitia and Mass{\'e}, Marion},
journal={Planetary and Space Science},
volume={182},
pages={104808},
year={2020},
publisher={Elsevier}
}
@Article{palaseanu2023,
AUTHOR = {Palaseanu-Lovejoy, Monica and Alexandrov, Oleg and Danielson, Jeff and Storlazzi, Curt},
TITLE = {SaTSeaD: Satellite Triangulated Sea Depth Open-Source Bathymetry Module for NASA Ames Stereo Pipeline},
JOURNAL = {Remote Sensing},
VOLUME = {15},
YEAR = {2023},
NUMBER = {16},
ARTICLE-NUMBER = {3950},
URL = {https://www.mdpi.com/2072-4292/15/16/3950},
ISSN = {2072-4292},
ABSTRACT = {},
DOI = {10.3390/rs15163950}
}
@article{jakkula2010efficient,
title={Efficient feature detection using OBAloG: optimized box approximation of Laplacian of Gaussian},
JOURNAL = {Master thesis, Kansas State University},
author={Jakkula, Vinayak Reddy},
year={2010}
}
@article{jindal2024measuring_v2,
title={Measuring erosional and depositional patterns across Comet 67P's Imhotep region},
author={Jindal, AS and Birch, SPD and Hayes, AG and {\"O}zyurt, FP and Issah, AB and Moruzzi, SA and Barrington, MN and Soderblom, JM and Kirk, RL and Marschall, R and others},
journal={Journal of Geophysical Research: Planets},
volume={129},
number={2},
pages={e2023JE008089},
year={2024},
publisher={Wiley Online Library},
doi = {https://doi.org/10.1029/2023JE008089}
}
@article{Miclea_subpixel,
author={Miclea, Vlad-Cristian and Vancea, Cristian-Cosmin and Nedevschi, Sergiu},
journal={2015 IEEE International Conference on Intelligent Computer Communication and Processing (ICCP)},
title={New sub-pixel interpolation functions for accurate real-time stereo-matching algorithms},
year={2015},
volume={},
number={},
pages={173-178},
keywords={Interpolation;Histograms;Accuracy;Real-time systems;Cameras;Approximation algorithms;Pipelines;SGM;Sub-pixel accuracy;Function fitting;Interpolation},
doi={10.1109/ICCP.2015.7312625}}
@article{lesage2021constraints,
title={Constraints on effusive cryovolcanic eruptions on Europa using topography obtained from Galileo images},
author={Lesage, Elodie and Schmidt, Fr{\'e}d{\'e}ric and Andrieu, Fran{\c{c}}ois and Massol, H{\'e}l{\`e}ne},
journal={Icarus},
volume={361},
pages={114373},
year={2021},
publisher={Elsevier}
}
@article{nuth2011co,
title={Co-registration and bias corrections of satellite elevation data sets for quantifying glacier thickness change},
author={Nuth, Christopher and K{\"a}{\"a}b, Andreas},
journal={The Cryosphere},
volume={5},
number={1},
pages={271--290},
year={2011},
publisher={Copernicus GmbH}
}
@article{goossens2020high,
title={High-resolution gravity field models from GRAIL data and implications for models of the density structure of the Moon's crust},
author={Goossens, S and Sabaka, TJ and Wieczorek, MA and Neumann, GA and Mazarico, E and Lemoine, FG and Nicholas, JB and Smith, DE and Zuber, MT},
journal={Journal of Geophysical Research: Planets},
volume={125},
number={2},
pages={e2019JE006086},
year={2020},
publisher={Wiley Online Library}
}
@article{ghuffar2022pipeline,
title={A pipeline for automated processing of declassified Corona KH-4 (1962--1972) stereo imagery},
author={Ghuffar, Sajid and Bolch, Tobias and Rupnik, Ewelina and Bhattacharya, Atanu},
journal={IEEE Transactions on Geoscience and Remote Sensing},
volume={60},
pages={1--14},
year={2022},
publisher={IEEE}
}
@article{boatwright2024sfs,
title={SfS-Refined Digital Elevation Models of Malapert Massif and Mons Agnes, Moon},
author={Boatwright, Benjamin},
journal={Harvard Dataverse dataset},
pages={289},
year={2024}
}
@article{hemmi2025lroc,
title={LROC NAC-derived Meter-scale Topography of the Moon's South Polar Landing Sites: Digital Terrain Models and Their Quality Assessments},
author={Hemmi, Ryodo and Inoue, Hiroka and Kikuchi, Hiroshi and Sato, Hiroyuki and Miyamoto, Hideaki and Otake, Hisashi and Yamamoto, Mitsuo},
journal={The Planetary Science Journal},
volume={6},
number={11},
year={2025},
publisher={The American Astronomical Society},
month={November},
note={Published 2025 November 14}
}
@article{quan1995empirical,
title={Empirical equation for the index of refraction of seawater},
author={Quan, Xiaohong and Fry, Edward S},
journal={Applied optics},
volume={34},
number={18},
pages={3477--3480},
year={1995},
publisher={Optical Society of America}
}
================================================
FILE: docs/building_asp.rst
================================================
.. _building_asp:
Building and releasing ASP
==========================
This chapter will describe how ASP can be built from source and with conda, how
to build the documentation, and how to prepare a new ASP release. This is
focused towards the developer. Users should read instead the installation guide
in :numref:`installation`.
.. _build_from_source:
Building ASP from source
------------------------
All dependencies for the *latest development version* of ASP are a available as
a `binary tarball
`_.
The dependencies for the latest stable version of ASP are in the
``stereopipeline-feedstock`` repository (:numref:`packages_to_build`).
Assume that all dependencies, including the development tools, are installed
in the ``asp_deps`` conda environment and the ``PATH`` variable is set up
to use them.
Create a work directory::
workDir=$HOME/build_asp
mkdir -p $workDir
Build VisionWorkbench and Stereo Pipeline version 3.6.0::
cd $workDir
envPath=$HOME/miniconda3/envs/asp_deps
$envPath/bin/git clone \
git@github.com:visionworkbench/visionworkbench.git
cd visionworkbench
# Build a specific version
git checkout 3.6.0
mkdir -p build
cd build
$envPath/bin/cmake .. \
-DASP_DEPS_DIR=$envPath \
-DCMAKE_INSTALL_PREFIX=$workDir/install \
make -j10 && make install
cd $workDir
envPath=$HOME/miniconda3/envs/asp_deps
$envPath/bin/git clone \
git@github.com:NeoGeographyToolkit/StereoPipeline.git
cd StereoPipeline
# Build a specific version
git checkout 3.6.0
mkdir -p build
cd build
$envPath/bin/cmake .. \
-DASP_DEPS_DIR=$envPath \
-DCMAKE_INSTALL_PREFIX=$workDir/install \
-DVISIONWORKBENCH_INSTALL_DIR=$workDir/install \
make -j10 && make install
Check if the compilers are picked up correctly.
.. _conda_build:
Building ASP and its dependencies with conda
--------------------------------------------
This page is meant for advanced users of ASP and maintainers who would
like to use conda to rebuild ASP and all its dependencies. It is
suggested to carefully read :numref:`conda_intro` before this page.
To simplify maintenance, ASP and its dependencies are built upon ISIS
and its dependencies. The process for this is outlined below.
Setting up the ISIS environment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Search for the latest available ISIS conda package::
conda search -c usgs-astrogeology --override-channels isis
Here it was found that ISIS version 9.0.0 was the latest, which we
will assume throughout the rest of this document.
Create a conda environment for this version of ISIS::
conda create -n isis9.0.0
conda activate isis9.0.0
Add these channels to conda::
conda config --env --add channels conda-forge
conda config --env --add channels usgs-astrogeology
Run::
conda config --show channels
and verify that ``usgs-astrogeology`` and ``conda-forge`` are in this
order and above all other channels, except perhaps the
``nasa-ames-stereo-pipeline`` channel.
Install the desired version of ISIS::
conda install \
-c usgs-astrogeology \
-c conda-forge \
-c defaults \
--channel-priority flexible \
isis==9.0.0
For ISIS 9.0.0, it appears that ensuring flexible channel priority is necessary
for successful installation.
Install the version of PDAL that is compatible with current ISIS
(may already exist as part of latest ISIS)::
conda install -c conda-forge --channel-priority flexible libpdal-core
Save the current environment for reference as follows::
conda env export > isis9.0.0.yaml
Note: As of 12/2025 any recent PDAL is incompatible with ISIS 9.0.0 and needs to
be built from source. Also, ISIS 9.0.0 is not available for Mac Arm. An
unofficial version of this (``9.0.0_asp``) is available in the
``nasa-ames-stereo-pipeline`` channel.
Fetching the build tools
~~~~~~~~~~~~~~~~~~~~~~~~
We will create a new ``tools`` environment to have all the tools we
will need. These could be appended to the earlier environment, but it
is less likely to to have issues with dependency conflicts if these
are kept separate.
::
conda create -n tools
conda activate tools
conda install -c conda-forge anaconda-client conda-build
.. _packages_to_build:
Build recipe
~~~~~~~~~~~~
ASP has many dependencies that are source code, rather than pre-existing
packages.
The approach of producing a conda package for each turned out to be laborious,
because conda is slow and fragile. The latest approach is to build all these
packages and ASP itself in one single script, available at
https://github.com/NeoGeographyToolkit/stereopipeline-feedstock
To ensure this does not result in failures, the process is first tested
by building these manually, as done in the script, with the environment
specified there.
That environment is produced by adding dependencies to the installed ISIS
package.
The ASP version in this feedstock needs to be updated for each release.
Build command::
conda activate tools
conda config --set channel_priority flexible
conda build \
-c nasa-ames-stereo-pipeline \
-c usgs-astrogeology \
-c conda-forge \
stereopipeline-feedstock
The developers can upload the produced packages to the
``nasa-ames-stereo-pipeline`` channel.
After a package is uploaded, it can be installed in the desired environment as::
conda install \
-c nasa-ames-stereo-pipeline \
-c usgs-astrogeology \
-c conda-forge \
-n myEnv \
myPackage=myVersion=myBuildNo
If this is slow, check if the solver is set to ``libmamba``.
To list all packages in the channel, do::
conda search -c nasa-ames-stereo-pipeline --override-channels
To delete a package from this channel, run::
anaconda remove nasa-ames-stereo-pipeline/myPackage/myVersion
If adding an updated package with the same version, increment the build number.
Otherwise the new package may be confused with a cached version of a prior
build.
.. _helper_scripts:
Helper scripts
~~~~~~~~~~~~~~
The ``.github/workflows`` directory in the ``StereoPipeline`` repository has a
few scripts that show in detail the commands that are run to build ASP and its
dependencies.
.. _build_asp_doc:
Building the documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~
The ASP documentation is written in ReStructured Text and is built
with `Sphinx `_ and
`sphinxcontrib-bibtex `_.
See the `online ASP documentation
`_.
To build the documentation locally, install these packages such as::
conda create -n sphinx -c conda-forge sphinx sphinxcontrib-bibtex
conda activate sphinx
Note that we used a separate conda environment to minimize the chance
of conflict with other dependencies.
The ``docs`` directory contains the root of the documentation. Running there
``make html`` will create the HTML version of the documentation in the
``_build`` subdirectory.
Building the PDF documentation is no longer supported.
If the documentation builds well locally but fails to update on the web, see the
`cloud build status page
`_.
.. _asp_release_guide:
Releasing a new version of ASP
------------------------------
This is reading for ASP maintainers.
Update the version number
~~~~~~~~~~~~~~~~~~~~~~~~~
Inside *both* the VisionWorkbench and ASP repositories, edit
``src/CMakeLists.txt`` and set the new version, which should be the same for
both packages, and in the format ``x.y.z``. If the value there is
``x.y.z-alpha``, which is used to tag a pre-release, remove the ``-alpha`` part.
Increment one of these digits, depending on whether this is a major, minor, or
bugfix release. See https://semver.org for guidance.
Update the documentation
~~~~~~~~~~~~~~~~~~~~~~~~
Search all documentation for the old version number for ASP (such as 3.5.0) and
ISIS (such as 8.3.0) and replace it with the new version numbers (such as 3.6.0
and 9.0.0). This includes files in the base directory, not just in ``docs``.
Update NEWS.rst. Add the release date on top, along the lines of prior releases
(see further down in that file). This file must have a detailed log of all
changes, especially those that result in changed behavior or options, and it
should be incrementally updated as changes are made during development.
Update the copyright year in the README.rst and docs/conf.py files.
Commit and tag
~~~~~~~~~~~~~~
Commit all changes. Tag the release in *both* the VisionWorkbench and
StereoPipeline repos. Example::
git tag 3.6.0
git push origin 3.6.0 # commit to your branch
git push god 3.6.0 # commit to main branch
(Here it is assumed that ``origin`` points to your own fork and ``god``
points to the parent repository.)
If more commits were made and it is desired to apply this tag to a
different commit, first remove the exiting tag with::
git tag -d 3.6.0
git push origin :refs/tags/3.6.0
git push god :refs/tags/3.6.0
Build ASP with conda
~~~~~~~~~~~~~~~~~~~~
See :numref:`conda_build`.
.. _build_binaries:
Building self-contained binaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In addition to creating a conda package, it is also convenient to ship a
zipped package having all ASP tools and needed libraries (this includes the ISIS
libraries but not the ISIS tools).
Such a build is created for each release and also daily. These are posted on the
GitHub release page (:numref:`release`).
ASP uses a custom build system. It can be downloaded with ``git`` from:
https://github.com/NeoGeographyToolkit/BinaryBuilder
Create a conda environment that has the dependencies for building ASP, as
described in :numref:`build_from_source`. Assume it is called ``asp_deps``.
Install the C, C++, and Fortran compilers (same versions as for ISIS),
``cmake>=3.15.5``, ``pbzip2``, ``parallel``, and for Linux also the ``chrpath``
tool, as outlined on that page. The full list of dependencies is in the
``stereopipeline-feedstock`` repository (:numref:`packages_to_build`).
Go to the directory ``BinaryBuilder``, and run::
/path/to/python3 \
./build.py \
--cc \
--cxx \
--gfortran \
--asp-deps-dir $HOME/miniconda3/envs/asp_deps \
--build-root build_asp \
--skip-tests \
visionworkbench stereopipeline
This will fetch and build the latest VisionWorkbench and Stereo Pipeline in
``build_asp/build``, and will install them in ``build_asp/install``.
See :numref:`helper_scripts` for scrips illustrating this process.
Create a conda environment having Python and numpy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ISIS expects a full Python distribution to be shipped. To avoid shipping
the entire ``asp_deps`` environment, we create a separate environment
having only Python, numpy, with versions as expected by current ISIS.
Run, for example::
conda create -c conda-forge -n python_isis_v python=x.y.z numpy=a.b.c
Note that different versions of these may be needed for Linux and OSX.
The ``conda list`` command within the ``asp_deps`` environment
can be used to look up the desired versions.
Package the build
~~~~~~~~~~~~~~~~~
Run in ``BinaryBuilder`` the command::
/path/to/python3 \
./make-dist.py build_asp/install \
--asp-deps-dir $HOME/miniconda3/envs/asp_deps \
--python-env $HOME/miniconda3/envs/python_isis_v
Building and packaging should be done separately for Linux and OSX.
Test ASP
~~~~~~~~
The script ``auto_build/launch_master.sh`` in ``BinaryBuilder`` can be invoked
to build and test ASP. This script and also ``auto_build/utils.sh`` need to be
read carefully and some variables adjusted.
The `StereoPipeline test suite
`_ is run. It has
comprehensive tests for the ASP tools.
This functionality creates the daily builds, which are then
uploaded to the GitHub release page (:numref:`release`).
Prepare the documentation
~~~~~~~~~~~~~~~~~~~~~~~~~
Follow the instructions in :numref:`build_asp_doc`.
Push the release to GitHub
~~~~~~~~~~~~~~~~~~~~~~~~~~
Create a release on `GitHub
`_. Use the tag
for the current release. Add to the release notes a link to the appropriate
NEWS section of the documentation (:numref:`news`). *Only after this save
the release.*
*Do not delete and recreate the release* (:numref:`zenodo`). It is fine to
upload the binaries after a release is created, and delete and re-upload them.
The GitHub ``gh`` program can be invoked to push the binaries to the release.
Example::
cd BinaryBuilder/asp_tarballs
for file in StereoPipeline-3.6.0-2025-12-26-x86_64-Linux.tar.bz2 \
StereoPipeline-3.6.0-2025-12-26-x86_64-OSX.tar.bz2 \
StereoPipeline-3.6.0-2025-12-26-arm64-OSX.tar.bz2; do
gh release upload 3.6.0 $file \
-R git@github.com:NeoGeographyToolkit/StereoPipeline.git
done
Alternatively, these can be uploaded from a web browser.
.. _zenodo:
Zenodo link for the release
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Creating a release will trigger producing the Zenodo permanent link (after a few
minutes). That link cannot be changed, and the text it copies from the release
notes cannot be changed either.
It is very important to *create the release just once for the given tag*.
Otherwise, the Zenodo permanent link will always point to the earliest attempt
at making the release. It is fine to later overwrite the binaries for this
release, or even to upload them later. Just do not delete and recreate the
release page itself.
Do not just rename the latest automatically uploaded daily build, as that will
create an incorrect Zenodo link.
Wait for Zenodo to mint the link for this release, then visit the Zenodo page at
https://zenodo.org/badge/latestdoi/714891 (which will always point to the
'latest' DOI) and find there this release's URL. Put it at the top of
`README.rst
`_,
in the appropriate ``target`` field.
*Increment the version in the image field right above that.*
Add this link also to the NEWS.rst page, under the section name for the current
release.
Push this update to GitHub. The new commit will be after the tag for the
release, but that is not an issue. It is best to not change the tag after
the release and Zenodo link got created.
Announce the release
~~~~~~~~~~~~~~~~~~~~
Send an announcement of the new release to the `mailing list
`_, with
a link to the NEWS section for the current release from the documentation.
Post-release work
~~~~~~~~~~~~~~~~~
Update the version number in ``src/CMakeLists.txt`` in boh the VisionWorkbench
and ASP repositories.
If version 3.6.0 just got released, we expect that the next feature release will
likely be be 3.7.0. The version tag should be updated to 3.7.0-alpha in
anticipation in *both* the VisionWorkbench and ASP repositories.
See https://semver.org for guidance on versions.
================================================
FILE: docs/bundle_adjustment.rst
================================================
.. _bundle_adjustment:
Bundle adjustment
=================
Overview
--------
Satellite position and orientation errors have a direct effect on the
accuracy of digital elevation models produced by the Stereo Pipeline. If
they are not corrected, these uncertainties will result in systematic
errors in the overall position and slope of the DEM. Severe distortions
can occur as well, resulting in twisted or "taco-shaped" DEMs, though in
most cases these effects are quite subtle and hard to detect. In the
worst case, such as with old mission data like Voyager or Apollo, these
gross camera misalignments can inhibit Stereo Pipeline's internal
interest point matcher and block auto search range detection.
.. figure:: images/ba_orig_adjusted.png
Bundle adjustment is illustrated here using a color-mapped, hill-shaded
DEM mosaic from Apollo 15, Orbit 33, images. (a) Prior to bundle
adjustment, large discontinuities can exist between overlapping DEMs
made from different images. (b) After bundle adjustment, DEM alignment
errors are minimized and no longer visible.
Errors in camera position and orientation can be corrected using a
process called *bundle adjustment*. Bundle adjustment is the process of
simultaneously adjusting the properties of many cameras and the 3D
locations of the objects they see in order to minimize the error between
the estimated, back-projected pixel locations of the 3D objects and
their actual measured locations in the captured images. This is called
the *reprojection error*.
This complex process can be boiled down to this simple idea: bundle
adjustment ensures that the observations in multiple images of a single
ground feature are self-consistent. If they are not consistent, then the
position and orientation of the cameras as well as the 3D position of
the feature must be adjusted until they are. This optimization is
carried out along with thousands (or more) of similar constraints
involving many different features observed in other images. Bundle
adjustment is very powerful and versatile: it can operate on just two
overlapping images, or on thousands. It is also a dangerous tool.
Careful consideration is required to insure and verify that the solution
does represent reality.
Bundle adjustment can also take advantage of GCPs (:numref:`bagcp`), which are
3D locations of features that are known a priori (often by measuring them by
hand in another existing DEM). GCPs can improve the internal consistency of your
DEM or align your DEM to an existing data product. Finally, even though bundle
adjustment calculates the locations of the 3D objects it views, only the final
properties of the cameras are recorded for use by the Ames Stereo Pipeline.
Those properties can be loaded into the ``parallel_stereo`` program which uses
its own method for triangulating 3D feature locations.
When using the Stereo Pipeline, bundle adjustment is an optional step
between the capture of images and the creation of DEMs. The bundle
adjustment process described below should be completed prior to running
the ``parallel_stereo`` command.
Although bundle adjustment is not a required step for generating DEMs,
it is *highly recommended* for users who plan to create DEMs for
scientific analysis and publication. Incorporating bundle adjustment
into the stereo work flow not only results in DEMs that are more
internally consistent, it is also the correct way to co-register your
DEMs with other existing data sets and geodetic control networks.
A DEM obtained after bundle adjustment and stereo may need to be aligned
to a known reference coordinate system. For that, use the ``pc_align``
tool (:numref:`pc_align`).
See the options ``--heights-from-dem`` (:numref:`heights_from_dem`)
and ``--reference-terrain`` further down for how to incorporate an
external DEM in bundle adjustment. Note that these can only locally
refine camera parameters, an initial alignment with ``pc_align`` is
still necessary.
Optimizing of camera intrinsics parameters, such as optical center,
focal length, and distortion is also possible, as seen below.
.. _baasp:
Running bundle adjustment
-------------------------
Stereo Pipeline provides the ``bundle_adjust`` program
(:numref:`bundle_adjust`).
Start by running ``parallel_stereo`` without using bundle-adjusted camera
models::
parallel_stereo AS15-M-1134.cub AS15-M-1135.cub run_noadjust/run
See :numref:`nextsteps` for how how to improve the quality of stereo
correlation results (at the expense of speed).
Create a DEM and triangulation error image as in :numref:`point2dem`.
Run bundle adjustment::
bundle_adjust --camera-position-weight 0 \
--tri-weight 0.1 --tri-robust-threshold 0.1 \
AS15-M-1134.cub AS15-M-1135.cub -o run_ba/run
Here only camera positions and orientations are refined. How to optimize the
camera intrinsics (if applicable) is discussed further down
(:numref:`floatingintrinsics`).
Run ``parallel_stereo`` while using the bundle-adjusted camera models::
parallel_stereo AS15-M-1134.cub AS15-M-1135.cub \
--prev-run-prefix run_noadjust/run \
--bundle-adjust-prefix run_ba/run \
run_adjust/run
This should be followed, as before, by creation of a DEM and a triangulation
error image. Note the option ``--prev-run-prefix`` that allowed reusing
the previous run apart from the triangulation step. That speeds up the process,
and works well-enough unless the cameras change a lot.
.. figure:: images/MOC_tri_error.png
:name: moc_dem_intersection_errors
An unusually large intersection error (left), and the version after bundle
adjustment (right). Note that these do not use the same range of colors. The
images are produced with the MOC camera (:numref:`moc_example`). The
remaining wavy pattern is due to jitter, that ASP has a solver for
(:numref:`jitter_solve`). More illustrations are in
:numref:`ba_rpc_distortion` and :numref:`ba_frame_linescan_results`.
Bundle adjustment aims to make the cameras more self-consistent but offers no
guarantees about their absolute positions (unless GCP are used,
:numref:`bagcp`), in fact, the cameras can move away a lot sometimes. The
options ``--tri-weight``, ``--rotation-weight``, and
``--camera-position-weight`` can be used to constrain how much the cameras can
move during bundle adjustment. Note that large values for these may impact the
ability to make the cameras self-consistent.
This program can constrain the triangulated points, and hence the cameras,
relative to a DEM. This option only works when the cameras are already
rather well-aligned to this DEM and only fine-level adjustments are needed.
That is discussed in :numref:`heights_from_dem`.
ASP also offers the tool ``parallel_bundle_adjust`` which can create
match files using multiple processes spread over multiple machines
(:numref:`parallel_bundle_adjust`). These can also be used later
during stereo with the options ``--match-files-prefix`` and
``--clean-match-files-prefix``.
.. _floatingintrinsics:
Floating intrinsics and using a lidar or DEM ground truth
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section documents some advanced functionality, and it suggested the
reader study it carefully and invest a certain amount of time to fully
take advantage of these concepts.
When the input cameras are of Pinhole type (:numref:`pinholemodels`), optical
bar (:numref:`panoramic`), or CSM (:numref:`csm`), it is possible to optimize
(float, refine) the intrinsic parameters (focal length, optical center,
distortion, with a somewhat different list for optical bar cameras), in addition
to the extrinsics.
It is also possible to take advantage of an existing terrain ground truth, such
as a lidar file or a DEM, to correct imperfectly calibrated intrinsic
parameters, which can result in greatly improved results, such as creating less
distorted DEMs that agree much better with the ground truth.
See :numref:`intrinsics_no_constraints` for how to optimize intrinsics with no
constraints, :numref:`intrinsics_ground_truth` for when ground constraints can
be used (there exist options for sparse ground points and a DEM), and
:numref:`kaguya_ba` for how to have several groups of intrinsics.
Mixing frame and linescan cameras is discussed in :numref:`ba_frame_linescan`.
.. _intrinsics_no_constraints:
A first attempt at floating the intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This section is *only an introduction* of how to float the intrinsics. Detailed
examples are further down.
It is very strongly suggested to ensure that a good number of images exists,
they have a lot of overlap, that the cameras have been already bundle-adjusted
with intrinsics fixed and aligned to a DEM (:numref:`ba_pc_align`). Such a DEM
should be used as a constraint.
Note that when solving for intrinsics, ``bundle_adjust`` will by default
optimize all intrinsic parameters and will share them across all cameras. This
behavior can be controlled with the ``--intrinsics-to-float`` and
``--intrinsics-to-share`` parameters, or in a finer-grained way, as shown in
:numref:`kaguya_ba`.
The first invocation of camera optimization should be with intrinsics fixed::
bundle_adjust -t nadirpinhole --inline-adjustments \
left.tif right.tif left.tsai right.tsai -o run_ba/run
Here two images have been used for illustration purposes, but a larger number
should be used in practice.
It is suggested that one run ``parallel_stereo`` with the obtained cameras::
parallel_stereo -t nadirpinhole --alignment-method epipolar \
--stereo-algorithm asp_mgm --subpixel-mode 9 \
left.tif right.tif run_ba/run-left.tsai run_ba/run-right.tsai \
run_stereo/run
followed by DEM creation (:numref:`point2dem`)::
point2dem --tr RESOLUTION --errorimage run_stereo/run-PC.tif
Then examine and plot the intersection error::
gdalinfo -stats run_stereo/run-IntersectionErr.tif
colormap run_stereo/run-IntersectionErr.tif
stereo_gui run_stereo/run-IntersectionErr_CMAP.tif
See :numref:`running-stereo` for other stereo algorithms. For ``colormap``
(:numref:`colormap`), ``--min`` and ``--max`` bounds can be specified if the
automatic range is too large.
We also suggest inspecting the interest points
(:numref:`stereo_gui_view_ip`)::
stereo_gui left.tif right.tif run_ba/run
and then viewing the interest points from the menu.
If the interest points are not well-distributed, this may result in large ray
intersection errors where they are missing. Then, one should delete the existing
run directory and create a better set, as discussed in :numref:`custom_ip`.
If the interest points are good and the mean intersection error is
acceptable, but this error shows an odd nonlinear pattern, that means
it may be necessary to optimize the intrinsics. We do so by using the
cameras with the optimized extrinsics found earlier. This is just an
early such attempt, better approaches will be suggested below::
bundle_adjust -t nadirpinhole --inline-adjustments \
--solve-intrinsics --camera-position-weight 0 \
--max-pairwise-matches 20000 \
left.tif right.tif \
run_ba/run-left.tsai run_ba/run-right.tsai \
-o run_ba_intr/run
See :numref:`heights_from_dem` for how to use a DEM as a constraint.
See :numref:`dense_ip` for how to create dense interest points.
Both of these are *very recommended*.
It is important to note that only the non-zero intrinsics will be
optimized, and the step size used in optimizing a certain intrinsic
parameter is proportional to it. Hence, if an intrinsic is 0 and it is
desired to optimize it, it should be set to small non-zero value
suggestive of its final estimated scale. If the algorithm fails to give
a good solution, perhaps different initial values for the intrinsics
should be tried. For example, one can try changing the sign of the
initial distortion coefficients, or make their values much smaller.
It is good to use a lens distortion model such as the one ASP calls
*Tsai* (:numref:`pinholemodels`), as then the distortion operation
is a simple formula, which is fast and convenient in bundle adjustment,
when projecting into the camera is the key operation. Using models
like *Photometrix* and *Brown-Conrady* is not advised.
Here we assumed all intrinsics are shared. See
:numref:`kaguya_ba` for how to have several groups of
intrinsics. See also the option ``--intrinsics-to-share``.
Sometimes the camera weight may need to be decreased, even all the way
to 0, if it appears that the solver is not aggressive enough, or it may
need to be increased if perhaps it overfits. This will become less of a
concern if there is some ground truth, as discussed later.
Next, one can run ``parallel_stereo`` as before, with the new cameras, and see
if the obtained solution is more acceptable, that is, if the intersection error
is smaller. It is good to note that a preliminary investigation can already be
made right after bundle adjustment, by looking at the residual error files
before and after bundle adjustment. They are in the ``bundle_adjust`` output
directory, with names::
initial_residuals_pointmap.csv
final_residuals_pointmap.csv
If desired, these csv files can be converted to a DEM with
``point2dem``, which can be invoked with::
--csv-format 1:lon,2:lat,4:height_above_datum
then one can look at their statistics, also have them colorized, and
viewed in ``stereo_gui`` (:numref:`plot_csv`).
This file also shows how often each feature is seen in the images, so,
if three images are present, hopefully many features will be seen three
times.
.. _intrinsics_ground_truth:
Using ground truth when floating the intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If a point cloud having ground truth, such as a DEM or lidar file
exists, say named ``ref.tif``, it can be used as part of bundle
adjustment. For that, the stereo DEM obtained earlier
needs to be first aligned to this ground truth, such as::
pc_align --max-displacement VAL \
run_stereo/run-DEM.tif ref.tif \
--save-inv-transformed-reference-points \
-o run_align/run
(see the manual page of this tool in :numref:`pc_align` for more details).
This alignment can then be applied to the cameras as well::
bundle_adjust -t nadirpinhole --inline-adjustments \
--initial-transform run_align/run-inverse-transform.txt \
left.tif right.tif run_ba/run-left.tsai run_ba/run-right.tsai \
--apply-initial-transform-only -o run_align/run
If ``pc_align`` is called with the clouds in reverse order (the denser
cloud should always be the first), when applying the transform to the
cameras in ``bundle_adjust`` one should use ``transform.txt`` instead of
``inverse-transform.txt`` above.
Note that if your ground truth is in CSV format, any tools that use this cloud
must set ``--csv-format`` and perhaps also ``--datum`` and/or ``--csv-srs``.
See :numref:`ba_pc_align` for how to handle the case when input
adjustments exist.
There are two ways of incorporating a ground constraint in bundle adjustment.
The first one assumes that the ground truth is a DEM, and is very easy to use
with a large number of images (:numref:`heights_from_dem`). A second approach
can be used when the ground truth is sparse (and with a DEM as well). This is a
bit more involved (:numref:`reference_terrain`).
.. _heights_from_dem:
Using the heights from a reference DEM
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In some situations the DEM obtained with ASP is, after alignment, quite similar
to a reference DEM, but the heights may be off. This can happen, for example,
if the focal length or lens distortion are not accurately known.
In this case it is possible to borrow more accurate information from the
reference DEM. The option for this is ``--heights-from-dem``. An additional
control is given, in the form of the option ``--heights-from-dem-uncertainty``
(1 sigma, in meters). The smaller its value is, the stronger the DEM constraint.
This value divides the difference between the triangulated points being
optimized and their initial value on the DEM when added to the cost function
(:numref:`how_ba_works`).
The option ``--heights-from-dem-robust-threshold`` ensures that these weighted
differences plateau at a certain level and do not dominate the problem. The
default value is 0.1, which is smaller than the ``--robust-threshold`` value of
0.5, which is used to control the pixel reprojection error, as that is given a
higher priority. It is suggested to not modify this threshold, and adjust
instead ``--heights-from-dem-uncertainty``.
If a triangulated point is not close to the reference DEM, bundle adjustment
falls back to the ``--tri-weight`` constraint.
Here is an example when we solve for intrinsics with a DEM constraint. As in the
earlier section, *we assume that the cameras and the terrain are already
aligned*::
bundle_adjust -t nadirpinhole \
--inline-adjustments \
--solve-intrinsics \
--intrinsics-to-float all \
--intrinsics-to-share all \
--camera-position-weight 0 \
--max-pairwise-matches 20000 \
--heights-from-dem dem.tif \
--heights-from-dem-uncertainty 10.0 \
--parameter-tolerance 1e-12 \
--remove-outliers-params "75.0 3.0 20 25" \
left.tif right.tif \
run_align/run-run-left.tsai \
run_align/run-run-right.tsai \
-o run_ba_hts_from_dem/run
One should be careful with setting ``--heights-from-dem-uncertainty``. Having
it larger will ensure it does not prevent convergence.
It is *strongly suggested* to use dense interest points (:numref:`dense_ip`), if
solving for intrinsics, and have ``--max-pairwise-matches`` large enough to not
throw some of them out. We set ``--camera-position-weight 0``, as hopefully the
DEM constraint is enough to constrain the solution.
Here we were rather generous with the parameters for removing
outliers, as the input DEM may not be that accurate, and then if tying
too much to it some valid matches be be flagged as outliers otherwise,
perhaps.
The implementation of ``--heights-from-dem`` is as follows. Rays from matching
interest points are intersected with this DEM, and the average of the produced
points is projected vertically onto the DEM. This is declared to be the
intersection point of the rays, and the triangulated points being optimized
are constrained via ``--heights-from-dem-uncertainty`` to be close to this
point.
It is important to note that this heuristic may not be accurate if the rays have
a large intersection error. But, since bundle adjustment usually has two passes,
at the second pass the improved cameras are used to recompute the point on the
DEM with better accuracy.
This option can be more effective than using ``--reference-terrain`` when there
is a large uncertainty in camera intrinsics.
See two other large-scale examples of using ``--heights-from-dem``, without
floating the intrinsics, in the SkySat processing example (:numref:`skysat`),
using Pinhole cameras, and with linescan Lunar images with variable illumination
(:numref:`sfs-lola`).
Here we assumed all intrinsics are shared. See :numref:`kaguya_ba` for how to
have several groups of intrinsics. See also the option
``--intrinsics-to-share``.
It is suggested to look at the documentation of all the options
above and adjust them for your use case.
See :numref:`bundle_adjust` for the documentation of all options
above, and :numref:`ba_out_files` for the output reports being saved,
which can help judge how well the optimization worked.
.. _reference_terrain:
Sparse ground truth and using the disparity
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Here we will discuss an approach that works when the ground truth can
be sparse, and we make use of the stereo disparity. It requires more
work to set up than the earlier one.
We will need to create a disparity from the left and right images
that we will use during bundle adjustment. For that we will take the
disparity obtained in stereo and remove any intermediate transforms
stereo applied to the images and the disparity. This can be done as
follows::
stereo_tri -t nadirpinhole --alignment-method epipolar \
--unalign-disparity \
left.tif right.tif \
run_ba/run-left.tsai run_ba/run-right.tsai \
run_stereo/run
and then bundle adjustment can be invoked with this disparity and the
DEM/lidar file. Note that we use the cameras obtained after alignment::
bundle_adjust -t nadirpinhole --inline-adjustments \
--solve-intrinsics --camera-position-weight 0 \
--max-disp-error 50 \
--max-num-reference-points 1000000 \
--max-pairwise-matches 20000 \
--parameter-tolerance 1e-12 \
--robust-threshold 2 \
--reference-terrain lidar.csv \
--reference-terrain-weight 5 \
--disparity-list run_stereo/run-unaligned-D.tif \
left.tif right.tif \
run_align/run-run-left.tsai run_align/run-run-right.tsai \
-o run_ba_intr_lidar/run
Here we set the camera weight all the way to 0, since it is hoped that
having a reference terrain is a sufficient constraint to prevent
over-fitting.
We used ``--robust-threshold 2`` to make the solver work harder
where the errors are larger. This may be increased somewhat if the
distortion is still not solved well in corners.
See the note earlier in the text about what a good lens distortion
model is.
This tool will write some residual files of the form::
initial_residuals_reference_terrain.txt
final_residuals_reference_terrain.txt
which may be studied to see if the error-to-lidar decreased. Each
residual is defined as the distance, in pixels, between a terrain point
projected into the left camera image and then transferred onto the right
image via the unaligned disparity and its direct projection into the
right camera.
If the initial errors in that file are large to start with, say more
than 2-3 pixels, there is a chance something is wrong. Either the
cameras are not well-aligned to each other or to the ground, or the
intrinsics are off too much. In that case it is possible the errors are
too large for this approach to reduce them effectively.
We strongly recommend that for this process one should not rely on
bundle adjustment to create interest points, but to use the dense and
uniformly distributed ones created with stereo (:numref:`dense_ip`).
The hope is that after these directions are followed, this will result
in a smaller intersection error and a smaller error to the lidar/DEM
ground truth (the later can be evaluated by invoking
``geodiff --absolute`` on the ASP-created aligned DEM and the reference
lidar/DEM file).
Here we assumed all intrinsics are shared. See
:numref:`kaguya_ba` for how to have several groups of
intrinsics. See also the option ``--intrinsics-to-share``.
When the lidar file is large, in bundle adjustment one can use the flag
``--lon-lat-limit`` to read only a relevant portion of it. This can
speed up setting up the problem but does not affect the optimization.
Sparse ground truth and multiple images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Everything mentioned earlier works with more than two images, in fact,
having more images is highly desirable, and ideally the images overlap a
lot. For example, one can create stereo pairs consisting of first and
second images, second and third, third and fourth, etc., invoke the
above logic for each pair, that is, run stereo, alignment to the ground
truth, dense interest point generation, creation of unaligned
disparities, and transforming the cameras using the alignment transform
matrix. Then, a directory can be made in which one can copy the dense
interest point files, and run bundle adjustment with intrinsics
optimization jointly for all cameras. Hence, one should use a command as
follows (the example here is for 4 images)::
disp1=run_stereo12/run-unaligned-D.tif
disp2=run_stereo23/run-unaligned-D.tif
disp3=run_stereo34/run-unaligned-D.tif
bundle_adjust -t nadirpinhole --inline-adjustments \
--solve-intrinsics --camera-position-weight 0 \
img1.tif img2.tif img3.tif img4.tif \
run_align_12/run-img1.tsai run_align12/run-img2.tsai \
run_align_34/run-img3.tsai run_align34/run-img4.tsai \
--reference-terrain lidar.csv \
--disparity-list "$disp1 $disp2 $disp3" \
--robust-threshold 2 \
--max-disp-error 50 --max-num-reference-points 1000000 \
--overlap-limit 1 --parameter-tolerance 1e-12 \
--reference-terrain-weight 5 \
-o run_ba_intr_lidar/run
In case it is desired to omit the disparity between one pair of images,
for example, if they don't overlap, instead of the needed unaligned
disparity one can put the word ``none`` in this list.
Notice that since this joint adjustment was initialized from several
stereo pairs, the second camera picked above, for example, could have
been either the second camera from the first pair, or the first camera
from the second pair, so there was a choice to make. In :numref:`skysat`
an example is shown where a preliminary
bundle adjustment happens at the beginning, without using a reference
terrain, then those cameras are jointly aligned to the reference
terrain, and then one continues as done above, but this time one need
not have dealt with individual stereo pairs.
The option ``--overlap-limit`` can be used to control which images
should be tested for interest point matches, and a good value for it is
say 1 if one plans to use the interest points generated by stereo,
though a value of 2 may not hurt either. One may want to decrease
``--parameter-tolerance``, for example, to 1e-12, and set a value for
``--max-disp-error``, e.g, 50, to exclude unreasonable disparities (this
last number may be something one should experiment with, and the results
can be somewhat sensitive to it). A larger value of
``--reference-terrain-weight`` can improve the alignment of the cameras
to the reference terrain.
Also note the earlier comment about sharing and floating the intrinsics
individually.
.. _kaguya_ba:
Refining the intrinsics per sensor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Given a set of sensors, with each acquiring several images, we will optimize the
intrinsics per sensor. All images acquired with the same sensor will share the
same intrinsics, and none will be shared across sensors.
We will work with Kaguya TC linescan cameras and the CSM camera model
(:numref:`csm`). Pinhole cameras in .tsai format (:numref:`pinholemodels`) and
Frame cameras in CSM format (:numref:`csm_frame_def`) can be used as well.
See :numref:`floatingintrinsics` for an introduction on how optimizing intrinsics
works, and :numref:`kaguya_tc` for how to prepare and use Kaguya TC cameras.
See :numref:`ba_frame_linescan` for fine-level control per group and for how
to mix frame and linescan cameras.
.. _kaguya_watch:
Things to watch for
^^^^^^^^^^^^^^^^^^^
Optimizing the intrinsics can be tricky. One has to be careful to select a
non-small set of images that have a lot of overlap, similar illumination, and
an overall good baseline between enough images (:numref:`stereo_pairs`).
It is suggested to do a lot of inspections along the way. If things turn out to
work poorly, it is often hard to understand at what step the process failed.
Most of the time the fault lies with the data not satisfying the assumptions
being made.
The process will fail if, for example, the data is not well-aligned before
the refinement of intrinsics is started, if the illumination is so different that
interest point matches cannot be found, or if something changed about a sensor
and the same intrinsics don't work for all images acquired with that sensor.
The ``cam_test`` tool (:numref:`cam_test`) can be used to check if the distortion
model gets inverted correctly. The distortion model should also be expressive
enough to model the distortion in the images.
Image selection
^^^^^^^^^^^^^^^
We chose a set of 10 Kaguya stereo pairs with a lot of overlap (20 images in
total). The left image was acquired with the ``TC1`` sensor, and the right one
with ``TC2``. These sensors have different intrinsics.
Some Kaguya images have different widths. These should not be mixed together.
Of the images with narrower width, it was observed that images acquired with
"morning" illumination need different calibration than the rest. Hence,
there will be two groups of intrinsics for the narrow TC images.
Some images had very large difference in illumination (not for the same stereo
pair). Then, finding of matching interest points can fail. Kaguya images are
rather well-registered to start with, so the resulting small misalignment that
could not be corrected by bundle adjustment was not a problem in solving for
intrinsics, and ``pc_align`` (:numref:`pc_align`) was used later for individual
alignment. This is not preferable, in general. It was tricky however to find
many images with a lot of overlap, so this had to make do.
A modification of the work flow for the case of images with very different
illumination is in :numref:`kaguya_ba_illumination`.
.. _kaguya_ba_initial_ba:
Initial bundle adjustment with fixed intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Put the image and camera names in plain text files named ``images.txt`` and
``cameras.txt``. These must be in one-to-one correspondence, and with one image
or camera per line.
The order should be with TC1 images being before TC2. Later we will use the same
order when these are subdivided by sensor.
Initial bundle adjustment is done with the intrinsics fixed.
::
parallel_bundle_adjust \
--nodes-list nodes.txt \
--image-list images.txt \
--camera-list cameras.txt \
--num-iterations 50 \
--tri-weight 0.2 \
--tri-robust-threshold 0.2 \
--camera-position-weight 0 \
--auto-overlap-params 'dem.tif 15' \
--remove-outliers-params '75.0 3.0 20 20' \
--ip-per-tile 2000 \
--matches-per-tile 2000 \
--max-pairwise-matches 20000 \
-o ba/run
The option ``--auto-overlap-params`` is used with a prior DEM (such as gridded
and filled with ``point2dem`` at low resolution based on LOLA RDR data). This is
needed to estimate which image pairs overlap.
The option ``--remove-outliers-params`` is set so that only the worst outliers
(with reprojection error of 20 pixels or more) are removed. That because
imperfect intrinsics may result in accurate interest points that have a
somewhat large reprojection error. We want to keep such features in the corners
to help refine the distortion parameters.
The option ``--ip-per-tile`` is set to a large value so that many interest
points are generated, and then the best ones are kept. This can be way too large
for big images. (Consider using instead ``--ip-per-image``.) The option
``--matches-per-tile`` tries to ensure matches are uniformly distributed
(:numref:`custom_ip`).
Normally 50 iterations should be enough. Two passes will happen. After each
pass outliers will be removed.
It is very strongly suggested to inspect the obtained clean match files (that
is, without outliers) with ``stereo_gui``
(:numref:`stereo_gui_pairwise_matches`), and reprojection errors in the final
``pointmap.csv`` file (:numref:`ba_out_files`), using ``stereo_gui`` as well
(:numref:`plot_csv`). Insufficient or poorly distributed clean interest point
matches will result in a poor solution.
The reprojection errors are plotted in :numref:`kaguya_intrinsics_opt_example`.
Running stereo
^^^^^^^^^^^^^^
We will use the optimized CSM cameras saved in the ``ba`` directory
(:numref:`csm_state`). For each stereo pair, run::
parallel_stereo \
--job-size-h 2500 \
--job-size-w 2500 \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--nodes-list nodes.txt \
left.cub right.cub \
ba/run-left.adjusted_state.json \
ba/run-right.adjusted_state.json \
stereo_left_right/run
Then we will create a DEM at the resolution of the input images,
which in this case is 10 m/pixel. The local stereographic projection
will be used.
::
point2dem --tr 10 \
--errorimage \
--stereographic \
--proj-lon 93.7608 \
--proj-lat 3.6282 \
stereo_left_right/run-PC.tif
Normally it is suggested to rerun stereo with mapprojected images
(:numref:`mapproj-example`) to get higher quality results. For the current goal,
of optimizing the intrinsics, the produced terrain is good enough. See also
:numref:`nextsteps` for a discussion of various stereo algorithms.
Inspect the produced DEMs and intersection error files (:numref:`point2dem`).
The latter can be colorized (:numref:`colorize`). Use ``gdalinfo -stats``
(:numref:`gdal_tools`) to see the statistics of the intersection error. In this
case it turns out to be around 4 m, which, given the ground resolution of 10
m/pixel, is on the high side. The intersection errors are also higher at left
and right image edges, due to distortion. (For a frame sensor this error will
instead be larger in the corners.)
Evaluating agreement between the DEMs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Overlay the produced DEMs and check for any misalignment. This may happen
if there are insufficient interest points or if the unmodeled distortion
is large.
Create a blended average DEM from the produced DEMs using the
``dem_mosaic`` (:numref:`dem_mosaic`)::
dem_mosaic stereo*/run-DEM.tif -o mosaic_ba.tif
Alternatively, such a DEM can be created from LOLA RDR data, if dense enough,
as::
point2dem \
--csv-format 2:lon,3:lat,4:radius_km \
--search-radius-factor 10 \
--tr --t_srs \
lola.csv
It is likely better, however, to ensure there is a lot of overlap between the
input images and use the stereo DEM mosaic rather than LOLA.
*The process will fail if the DEM that is used as a constraint is misaligned
with the cameras.* Alignment is discussed in :numref:`intrinsics_ground_truth`.
It is useful to subtract each DEM from the mosaic using ``geodiff``
(:numref:`geodiff`)::
geodiff mosaic_ba.tif stereo_left_right/run-DEM.tif \
-o stereo_left_right/run
These differences can be colorized with ``stereo_gui`` using the ``--colorbar``
option (:numref:`colorize`). The std dev of the obtained signed difference
can be used as a measure of discrepancy. These errors should go down after
refining the intrinsics.
Uniformly distributed interest points
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For the next step, refining the intrinsics, it is important to have
well-distributed interest points.
Normally, the sparse interest points produced with bundle adjustment so far can
be used. *For most precise work, dense and uniformly distributed interest points
produced from disparity are necessary* (:numref:`dense_ip`).
For example, if the input dataset consists of 6 overlapping stereo pairs, stereo
can be run between each left image and every other right image, producing 36
sets of dense interest points. See an example in :numref:`change3`.
The interest point file names must be changed to respect the *naming convention*
(:numref:`ba_match_files`), reflecting the names of the raw images, then passed
to ``bundle_adjust`` via the ``--match-files`` option.
One can also take the sparse interest points, and augment them with dense
interest points from stereo only for a select set of pairs. All these must
then use the same naming convention.
Refining the intrinsics
^^^^^^^^^^^^^^^^^^^^^^^
We will use the camera files produced by ``bundle_adjust`` before, with names as
``ba/run-*.adjusted_state.json``. These have the refined position and
orientation. We will re-optimize those together with the intrinsics parameters,
including distortion (which in ``bundle_adjust`` goes by the name
``other_intrinsics``).
The images and (adjusted) cameras for individual sensors should be put in
separate files, but in the same overall order as before, to be able reuse the
match files. Then, the image lists will be passed to the ``--image-list`` option
with comma as separator (no spaces), and the same for the camera lists. The
bundle adjustment command becomes::
bundle_adjust --solve-intrinsics \
--inline-adjustments \
--intrinsics-to-float \
"optical_center focal_length other_intrinsics" \
--image-list tc1_images.txt,tc2_images.txt \
--camera-list tc1_cameras.txt,tc2_cameras.txt \
--num-iterations 10 \
--clean-match-files-prefix ba/run \
--heights-from-dem mosaic_ba.tif \
--heights-from-dem-uncertainty 10.0 \
--heights-from-dem-robust-threshold 0.1 \
--remove-outliers-params '75.0 3.0 20 20' \
--max-pairwise-matches 20000 \
-o ba_other_intrinsics/run
See :numref:`heights_from_dem` for the option ``--heights-from-dem``, and
:numref:`bundle_adjust` for the documentation of all options above.
If only a single sensor exists, the option ``--intrinsics-to-share`` should be
set.
If large errors are still left at the image periphery, adjust
``--heights-from-dem-uncertainty``. If a small value of this is used with an
inaccurate prior DEM, it will make the results worse. Also consider adding more
images overlapping with the current ones.
Some lens distortion parameters can be kept fixed (option
``--fixed-distortion-indices``).
.. figure:: images/kaguya_intrinsics_opt_example.png
:name: kaguya_intrinsics_opt_example
:alt: kaguya_intrinsics_opt_example
The reprojection errors (``pointmap.csv``) before (top) and after (bottom)
refinement of distortion. Some outliers are still visible but are harmless.
Dense and uniformly distributed interest points (:numref:`dense_ip`) are
strongly suggested, but not used here.
It can be seen that many red vertical patterns are now much attenuated (these
correspond to individual image edges). On the right some systematic errors
are seen (due to the search range in stereo chosen here being too small and
some ridges having been missed). Those do not affect the optimization. Using
mapprojected images would have helped with this. The ultimate check will be
the comparison with LOLA RDR (:numref:`kaguya_intrinsics_alignment_diff`).
Plotted with ``stereo_gui`` (:numref:`plot_csv`).
Recreation of the stereo DEMs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The new cameras can be used to redo stereo and the DEMs. It is suggested to
use the option ``--prev-run-prefix`` in ``parallel_stereo`` to
redo only the triangulation operation, which greatly speeds up processing
(see :numref:`bathy_reuse_run` and :numref:`mapproj_reuse`).
As before, it is suggested to examine the intersection error and the difference
between each produced DEM and the corresponding combined averaged DEM. These
errors drop by a factor of about 2 and 1.5 respectively.
Comparing to an external ground truth
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We solved for intrinsics by constraining against the averaged mosaicked DEM of
the stereo pairs produced with initial intrinsics. This works reasonably well if
the error due to distortion is somewhat small and the stereo pairs overlap
enough that this error gets averaged out in the mosaic.
Ideally, a known accurate external DEM should be used. For example, one could
create DEMs using LRO NAC data. Note that many such DEMs would be need to be
combined, because LRO NAC has a much smaller footprint.
Should such a DEM exist, before using it instead of the averaged mosaic, the
mosaic (or individual stereo DEMs) should be first aligned to the external DEM.
Then, the same alignment transform should be applied to the cameras
(:numref:`ba_pc_align`). Then the intrinsics optimization can happen as before.
We use the sparse `LOLA RDR
`_ dataset for
final validation. This works well enough because the ground footprint of Kaguya TC is
rather large.
Each stereo DEM, before and after intrinsics refinement, is individually aligned to
LOLA, and the signed difference to LOLA is found.
::
pc_align --max-displacement 50 \
--save-inv-transformed-reference-points \
dem.tif lola.csv \
-o run_align/run
point2dem --tr 10 \
--errorimage \
--stereographic \
--proj-lon 93.7608 \
--proj-lat 3.6282 \
run_align/run-trans_reference.tif
geodiff --csv-format 2:lon,3:lat,4:radius_km \
run_align/run-trans_reference-DEM.tif lola.csv \
-o run_align/run
The ``pc_align`` tool is quite sensitive to the value of ``--max-displacement``
(:numref:`pc_align_max_displacement`). Here it was chosen to be somewhat larger
than the vertical difference between the two datasets to align. That because
KaguyaTC is already reasonably well-aligned.
.. figure:: images/kaguya_intrinsics_diff.png
:name: kaguya_intrinsics_alignment_diff
:alt: kaguya_intrinsics_alignment_diff
The signed difference between aligned stereo DEMs and LOLA RDR before (top)
and after (bottom) refinement of distortion. (Blue = -20 meters, red = 20
meters.) It can be seen that the warping of the DEMs due to distortion is much
reduced. Plotted with ``stereo_gui`` (:numref:`plot_csv`).
.. _kaguya_ba_illumination:
Handling images with very different illumination
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If each stereo pair has consistent illumination, but the illumination is very
different between pairs, then the above approach may not work well as tie points
could be hard to find. It is suggested to do the initial bundle adjustment per
each stereo pair, followed by alignment of the individual produced DEMs to a
reference dataset.
Apply the alignment transform to the pairwise bundle-adjusted cameras as well
(:numref:`ba_pc_align`), and use these cameras for the refinement of intrinsics,
with the ground constraint being the mosaic of these aligned DEMs.
It is suggested to examine how each aligned DEM differs from the
reference, and the same for their mosaic. The hope is that the mosaicking will
average out the errors in the individual DEMs.
If a lot of such stereo pairs are present, for the purpose of refinement of
intrinsics it is suggested to pick just a handful of them, corresponding to the
area where the mosaicked DEM differs least from the reference, so where the
distortion artifacts are most likely to have been averaged well.
.. _ba_frame_linescan:
Mixing frame and linescan cameras
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
So far we discussed refining the intrinsics for pinhole (frame) cameras, such as
in :numref:`heights_from_dem`, and for linescan cameras, such as in
:numref:`kaguya_ba`.
Here we will consider the situation when we have both. It is assumed that the
images acquired with these sensors are close in time and have similar
illumination. There should be a solid amount of image overlap, especially in the
corners of the images whose distortion should be optimized.
It will be illustrated how the presumably more accurate linescan sensor images
can be used to refine the intrinsics of the frame sensor.
Preparing the inputs
^^^^^^^^^^^^^^^^^^^^
The frame cameras can be in the black-box RPC format (:numref:`rpc`), or any
other format supported by ASP. The cameras can be converted to the CSM format
using ``cam_gen`` (:numref:`cam_gen_frame`). This will find the best-fit
intrinsics, including the lens distortion.
It is important to know at least the focal length of the frame cameras somewhat
accurately. This can be inferred based on satellite elevation and ground
footprint.
Once the first frame camera is converted to CSM, the rest of them that are
supposed to be for the same sensor model can borrow the just-solved intrinsic
parameters using the option ``--sample-file prev_file.json`` (the ``cam_gen``
manual has the full invocation).
The linescan cameras can be converted to CSM format using ``cam_gen`` as well
(:numref:`cam_gen_linescan`). This does not find a best-fit model, but rather
reads the linescan sensor poses and intrinsics from the input file.
We will assume in this basic example that we have two frame camera images
sharing intrinsics, named ``frame1.tif`` and ``frame2.tif``, and two linescan
camera images, for which will not enforce that the intrinsics are shared. They
can even be from different vendors. The linescan intrinsics will be kept fixed.
Assume these files are named ``linescan1.tif`` and ``linescan2.tif``. The camera
names will have the same convention, but ending in ``.json``.
Initial bundle adjustment
^^^^^^^^^^^^^^^^^^^^^^^^^
The same approach as in :numref:`kaguya_ba_initial_ba` can be used.
A DEM may be helpful to help figure out which image pairs overlap, but is not
strictly necessary.
Ensure consistent order of the images and cameras, both here and in the next
steps. This will guarantee that all generated match files will be used. The
order here will be ``frame1``, ``frame2``, ``linescan1``, ``linescan2``.
It is very strongly suggested to examine the stereo convergence angles
(:numref:`ba_conv_angle`). At least some of them should be at least 10-15
degrees, to ensure a robust solution.
Also examine the pairwise matches in ``stereo_gui``
(:numref:`stereo_gui_pairwise_matches`), the final residuals per camera
(:numref:`ba_errors_per_camera`), and per triangulated point
(:numref:`ba_err_per_point`). The latter can be visualized in ``stereo_gui``
(:numref:`plot_csv`). The goal is to ensure well-distributed features,
and that the errors are pushed down uniformly.
Dense interest points produced from stereo are strongly suggested
(:numref:`dense_ip`). An example using these is in :numref:`change3`.
Evaluation of terrain models
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
As in :numref:`kaguya_ba`, it is suggested to create several stereo DEMs after
the initial bundle adjustment. For example, make one DEM for the frame camera
pair, and a second for the linescan one. Use mapprojected images, the
``asp_mgm`` algorithm (:numref:`nextsteps`), and a local stereographic
projection for the produced DEMs.
One should examine the triangulation error for each DEM
(:numref:`triangulation_error`), and the difference between them with
``geodiff`` (:numref:`geodiff`). Strong systematic errors for the frame camera
data will then motivate the next steps.
Refinement of the frame camera intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We will follow closely the recipe in :numref:`kaguya_ba`. It is suggested to use
for the refinement step the linescan DEM as a constraint (option
``--heights-from-dem``). If a different DEM is employed, the produced bundle-adjusted
cameras and DEMs should be aligned to it first (:numref:`ba_pc_align`).
As for :numref:`kaguya_ba`, we need to create several text files, with each
having the names of the images whose intrinsics are shared, and the same for the
cameras.
If not sure that the linescan cameras have the same intrinsics, they can be kept
in different files. We will keep those intrinsics fixed in either ase.
Assume the previous bundle adjustment was done with the output prefix
``ba/run``. The files for the next step are created as follows. For the
cameras::
ls ba/run-frame1.adjusted_state.json \
ba/run-frame2.adjusted_state.json > frame_cameras.txt
ls ba/run-linescan1.adjusted_state.json > linescan1_cameras.txt
ls ba/run-linescan2.adjusted_state.json > linescan2_cameras.txt
and similarly the images. Hence, we have 3 groups of sensors. These
files will be passed to ``bundle_adjust`` as follows::
--image-list frame_images.txt,linescan1_images.txt,linescan2_images.txt \
--camera-list frame_cameras.txt,linescan1_cameras.txt,linescan2_cameras.txt
Use a comma as separator, and no spaces.
We will float the intrinsics for the frame cameras, and keep the linescan intrinsics
(but not poses) fixed. This is accomplished with the option::
--intrinsics-to-float '1:focal_length,optical_center,other_intrinsics
2:none 3:none'
Optimizing the optical center may not be necessary, as this intrinsic parameter
may correlate with the position of the cameras, and these are not easy to
separate. Optimizing this may produce an implausible optical center.
Dense matches from disparity are strongly recommended (:numref:`dense_ip`).
Some lens distortion parameters can be kept fixed (option
``--fixed-distortion-indices``).
.. _ba_frame_linescan_results:
Post-refinement evaluation
^^^^^^^^^^^^^^^^^^^^^^^^^^
New DEMs and intersection error maps can be created. The previous stereo runs
can be reused with the option ``--prev-run-prefix`` in ``parallel_stereo`` (:numref:`mapproj_reuse`).
.. figure:: images/frame_linescan_dem_diff.png
:name: frame_linescan_dem_diff
:alt: frame_linescan_dem_diff
The signed difference between the frame and linescan DEMs before intrinsics
refinement (left) and after (right).
.. figure:: images/frame_linescan_intersection_error.png
:name: frame_linescan_intersection_error
:alt: frame_linescan_intersection_error
The triangulation error for the frame cameras before refinement of intrinsics
(left) and after (right). It can be seen in both figures that systematic
differences are greatly reduced.
.. _custom_ip:
Custom approaches to interest points
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. _sparse_ip:
Sparse and roughly uniformly distributed interest points
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To attempt to create roughly uniformly distributed *sparse* interest points
during bundle adjustment, use options along the lines ``--ip-per-tile 1000
--matches-per-tile 500 --max-pairwise-matches 10000``.
If the images have very different perspectives, it is suggested to create the
interest points based on mapprojected images (:numref:`mapip`).
Note that if the images are big, this will result in a very large number of
potential matches, because a tile has the size of 1024 pixels. (See
:numref:`ba_options` for the reference documentation for these options.)
To produce sparse interest point matches that are accurate to *subpixel level*,
use ``--ip-detect-method 1``.
.. _dense_ip:
Dense and uniformly distributed interest points
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Dense and uniformly distributed interest points can be created during stereo
(:numref:`tutorial`). If having many images, that will mean many combinations of
stereo pairs. A representative set of stereo pairs between all images is usually
sufficient.
The resulting interest points will be between the *original, unprojected and
unaligned images*. This is true even when stereo itself is done with
mapprojected images.
For each stereo invocation, add options along the lines of::
--num-matches-from-disparity 10000
or::
--num-matches-from-disp-triplets 10000
in order to create such a match file.
The latter option will ensure that, when there are more than two images, a dense
subset of features within area of overlap will have corresponding matches in
more than two images, with a single triangulated point on the ground for each
such matching feature set. If having many such stereo pairs, some triangulated
points will be represented with matches in all images.
This can be quite important for bundle adjustment. This number of features for
each triangulated point is the last field in the ``pointmap.csv`` report files
(:numref:`ba_err_per_point`).
In the latest ASP (:numref:`release`), these options are equivalent.
The produced match file name is named along the lines of::
run/run-disp-left__right.match
where ``left.tif`` and ``right.tif`` are the input images. If these images are
mapprojected, the latest ASP (post version 3.4.0) will instead adjust the match
file name to reflect the original, unprojected image names, as the matches are
between those images.
In either case, the produced match files must be copied from individual stereo
runs to the same directory, and *use the standard naming convention* for the
original image names (:numref:`ba_match_files`). The match files must be passed
to ``bundle_adjust`` via the ``--match-files-prefix`` option. In this example,
the prefix would be ``run/run-disp``.
Invoke ``bundle_adjust`` a value of ``--max-pairwise-matches`` that is *at least
twice* the number of matches created here to ensure they are all kept.
A detailed example of using dense matches for bundle adjustment is in
:numref:`change3`.
These options are formally described in :numref:`triangulation_options`.
Interest points from mapprojected images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Interest point matches can be found between mapprojected images first, and those
can be unprojected and used in bundle adjustment. This can produce many more
interest points when the difference of perspective or scale between images is
large. See :numref:`mapip`.
.. _limit_ip:
Limit extent of interest point matches
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To limit the triangulated points produced from interest points to a certain area
during bundle adjustment, two approaches are supported. One is the option
``--proj-win``, coupled with ``--proj-str``.
The other is using the ``--weight-image`` option (also supported by the jitter
solver, :numref:`jitter_solve`). In locations where a given georeferenced weight
image has non-positive or nodata values, triangulated points will be ignored.
Otherwise each pixel reprojection error will be multiplied by the weight closest
geographically to the triangulated point. The effect is to work harder on the
areas where the weight is higher.
Such a weight image can be created from a regular georeferenced image with
positive pixel values as follows. Open it in ``stereo_gui``, and draw on top of
it one or more polygons, each being traversed in a counterclockwise direction,
and with any holes oriented clockwise (:numref:`plot_poly`). Save this shape as
``poly.shp``, and then run::
cp georeferenced_image.tif aux_image.tif
gdal_rasterize -i -burn -32768 poly.shp aux_image.tif
This will keep the data inside the polygons and set the data outside to this value.
The value to burn should be negative and smaller than any valid pixel value in
the image. To keep the data outside the polygons, omit the ``-i`` option.
Then, create a mask of valid values using ``image_calc`` (:numref:`image_calc`),
as follows::
image_calc -c "max(sign(var_0), 0)" \
--output-nodata-value var_0 \
aux_image.tif -o weight.tif
Examine the obtained image in ``stereo_gui`` and click on various pixels to
inspect the values.
If the image does not have positive values to start with, those values
can be first shifted up with ``image_calc``.
Various such weight images can be merged with ``dem_mosaic``
(:numref:`dem_mosaic`) or the values manipulated with ``image_calc``.
.. _ba_rpc_distortion:
RPC lens distortion
~~~~~~~~~~~~~~~~~~~
ASP provides a lens distortion model for Pinhole cameras
(:numref:`pinholemodels`) that uses Rational Polynomial Coefficients (RPC) of
arbitrary degree (:numref:`rpc_distortion`). This can help fit lens distortion
where other simpler models cannot.
The tool ``convert_pinhole_model`` (:numref:`convert_pinhole_model`) can create
camera models with RPC distortion.
It is very important for the input distortion coefficients to be manually
modified so they are on the order of 1e-7 or more, as otherwise they will be
hard to optimize and may stay small. In the latest builds this is done
automatically by ``bundle_adjust`` (option ``--min-distortion``).
See :numref:`intrinsics_ground_truth` and :numref:`kaguya_ba` for examples of
how to to optimize the lens distortion. An example specifically using RPC is
illustrated in :numref:`kh7_fig`. It is suggested to use dense interest point
matches from disparity (:numref:`dense_ip`).
.. figure:: images/biradial_err_rpc.png
Triangulation error (:numref:`triangulation_error`) examples without modeling
distortion (top), and after optimizing the lens distortion with RPC of degree
6 (bottom). The dataset for this example was acquired by a drone and had "biradial"
distortion.
.. _jigsaw:
Bundle adjustment using ISIS
----------------------------
In what follows we describe how to do bundle adjustment using ISIS's
toolchain. It also serves to describe bundle adjustment in more detail,
which is applicable to other bundle adjustment tools as well, including
Stereo Pipeline's own tool.
ASP's ``bundle_adjust`` program can read and write the ISIS control network
format, hence the ASP and ISIS tools can be compared or used together
(:numref:`control_network`).
In bundle adjustment, the position and orientation of each camera
station are determined jointly with the 3D position of a set of image
tie-points points chosen in the overlapping regions between images. Tie
points, as suggested by the name, tie multiple camera images together.
Their physical manifestation would be a rock or small crater than can be
observed across more than one image.
Tie-points are automatically extracted using ISIS's ``autoseed`` and
``pointreg`` (alternatively one could use a number of outside methods
such as the famous SURF :cite:`surf08`). Creating a
collection of tie points, called a *control network*, is a three step
process. First, a general geographic layout of the points must be
decided upon. This is traditionally just a grid layout that has some
spacing that allows for about 20-30 measurements to be made per image.
This shows up in slightly different projected locations in each image
due to their slight misalignments. The second step is to have an
automatic registration algorithm try to find the same feature in all
images using the prior grid as a starting location. The third step is to
manually verify all measurements visually, checking to insure that each
measurement is looking at the same feature.
.. figure:: images/ba_feature_observation.png
:name: ba_feature
:alt: A feature observation
A feature observation in bundle adjustment, from
:cite:`moore09`
Bundle adjustment in ISIS is performed with the ``jigsaw`` executable.
It generally follows the method described
in :cite:`triggs00` and determines the best camera
parameters that minimize the projection error given by
.. math::
{\bf \epsilon} = \sum_k\sum_j(I_k-I(C_j, X_k))^2
where :math:`I_k` are the tie points on the image plane, :math:`C_j` are the
camera parameters, and :math:`X_k` are the 3D positions associated with features
:math:`I_k`. :math:`I(C_j, X_k)` is an image formation model (i.e. forward
projection) for a given camera and 3D point. To recap, it projects the 3D point,
:math:`X_k`, into the camera with parameters :math:`C_j`.
This produces a predicted image location for the 3D point that is compared
against the observed location, :math:`I_k`. It then reduces this error with the
Levenberg-Marquardt algorithm (LMA). Speed is improved by using sparse methods
as described in :cite:`hartley04`, :cite:`konolige:sparsesparse`, and
:cite:`cholmod`.
Even though the arithmetic for bundle adjustment sounds clever, there
are faults with the base implementation. Imagine a case where all
cameras and 3D points were collapsed into a single point. If you
evaluate the above cost function, you'll find that the error is indeed
zero. This is not the correct solution if the images were taken from
orbit. Another example is if a translation was applied equally to all 3D
points and camera locations. This again would not affect the cost
function. This fault comes from bundle adjustment's inability to control
the scale and translation of the solution. It will correct the geometric
shape of the problem, yet it cannot guarantee that the solution will
have correct scale and translation.
ISIS attempts to fix this problem by adding two additional cost
functions to bundle adjustment. First of which is
.. math::
{\bf \epsilon} = \sum_j(C_j^{initial}-C_j)^2.
This constrains camera parameters to stay relatively close to their initial
values. Second, a small handful of 3D ground control points (:numref:`bagcp`)
can be chosen by hand and added to the error metric as
.. math::
{\bf \epsilon} = \sum_k(X_k^{gcp}-X_k)^2
to constrain these points to known locations in the planetary coordinate frame.
A physical example of a ground control point could be the location of a lander
that has a well known location. GCPs could also be hand-picked points against a
highly regarded and prior existing map such as the THEMIS Global Mosaic or the
LRO-WAC Global Mosaic.
Like other iterative optimization methods, there are several conditions that
will cause bundle adjustment to terminate. When updates to parameters become
insignificantly small or when the error, :math:`{\bf \epsilon}`, becomes
insignificantly small, then the algorithm has converged and the result is most
likely as good as it will get. However, the algorithm will also terminate when
the number of iterations becomes too large in which case bundle adjustment may
or may not have finished refining the parameters of the cameras.
.. _ba_example:
Tutorial: Processing Mars Orbital Camera images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This tutorial for ISIS's bundle adjustment tools is taken from
:cite:`lunokhod:controlnetwork` and
:cite:`lunokhod:gcp`. These tools are not a product of NASA
nor the authors of Stereo Pipeline. They were created by USGS and their
documentation is available at :cite:`isis:documentation`.
What follows is an example of bundle adjustment using two MOC images of Hrad
Vallis. We use images E02/01461 and M01/00115, the same as used in
:numref:`moc_tutorial`. These images are available from NASA's PDS (the ISIS
``mocproc`` program will operate on either the IMQ or IMG format files, we use
the ``.imq`` below in the example).
Ensure that ISIS and its supporting data is installed, per :numref:`planetary_images`,
and that ``ISISROOT`` and ``ISIS3DATA`` are set. The string ``ISIS>`` is not
part of the shell commands below, it is just suggestive of the fact that we operate
in an ISIS environment.
Fetch the MOC images anc convert them to ISIS cubes.
::
ISIS> mocproc from=E0201461.imq to=E0201461.cub mapping=no
ISIS> mocproc from=M0100115.imq to=M0100115.cub mapping=no
Note that the resulting images are not map-projected. That because bundle
adjustment requires the ability to project arbitrary 3D points into the camera
frame. The process of map-projecting an image dissociates the camera model from
the image. Map-projecting can be perceived as the generation of a new infinitely
large camera sensor that may be parallel to the surface, a conic shape, or
something more complex. That makes it extremely hard to project a random point
into the camera's original model. The math would follow the transformation from
projection into the camera frame, then projected back down to surface that ISIS
uses, then finally up into the infinitely large sensor. ``Jigsaw`` does not
support this and thus does not operate on map-projected images.
ASP's ``bundle_adjust`` program, however, can create and match features
on mapprojected images, and then project those into the original images
(:numref:`mapip`).
Before we can dive into creating our tie-point measurements we must
finish prepping these images. The following commands will add a vector
layer to the cube file that describes its outline on the globe. It will
also create a data file that describes the overlapping sections between
files.
::
ISIS> footprintinit from=E0201461.cub
ISIS> footprintinit from=M0100115.cub
ISIS> ls *.cub > cube.lis
ISIS> findimageoverlaps from=cube.lis overlaplist=overlap.lis
At this point, we are ready to start generating our measurements. This
is a three step process that requires defining a geographic pattern for
the layout of the points on the groups, an automatic registration pass,
and finally a manual clean up of all measurements. Creating the ground
pattern of measurements is performed with ``autoseed``. It requires a
settings file that defines the spacing in meters between measurements.
For this example, write the following text into a *autoseed.def* file.
::
Group = PolygonSeederAlgorithm
Name = Grid
MinimumThickness = 0.01
MinimumArea = 1
XSpacing = 1000
YSpacing = 2000
End_Group
The minimum thickness defines the minimum ratio between the sides of the
region that can have points applied to it. A choice of 1 would define a
square and anything less defines thinner and thinner rectangles. The
minimum area argument defines the minimum square meters that must be in
an overlap region. The last two are the spacing in meters between
control points. Those values were specifically chosen for this pair so
that about 30 measurements would be produced from ``autoseed``. Having
more control points just makes for more work later on in this process.
Run ``autoseed`` as follows.
::
ISIS> autoseed fromlist=cube.lis overlaplist=overlap.lis \
onet=control.net deffile=autoseed.def networkid=moc \
pointid=vallis???? description=hrad_vallis
Note the option ``pointid=vallis????``. It must be used verbatim. This command
will create ids that will look like ``vallis0001``, ``valis0002``, potentially
up to ``vallis9999``. The number of question marks will control hom many
measurements are created. See `autoseed
`_'s
manual page for more details.
Inspect this control network with
`qnet `_. Type "qnet" in a terminal, with
no options. A couple of windows will pop up. From the *File* menu of the
``qnet`` window, click on *Open control network and cube list*. Open the
file ``cube.lis``. From the same dialog, open ``control.net``.
Click on ``vallis0001`` in the Control Network Navigator window, then click on
``view cubes``. This will show the illustration below.
.. figure:: images/qnet/Qnet_AfterAutoseed_400px.png
:name: after_autoseed
:alt: Autoseed visualization
A visualization of the features laid out by ``autoseed`` in ``qnet``.
Note that the marks do not cover the same features between images.
This is due to the poor initial SPICE (camera pose) data for MOC images.
The next step is to perform auto registration of these features between the two
images using `pointreg
`_.
This program also requires a settings file that describes how to do the
automatic search. Copy the text box below into a *autoRegTemplate.def* file.
::
Object = AutoRegistration
Group = Algorithm
Name = MaximumCorrelation
Tolerance = 0.7
EndGroup
Group = PatternChip
Samples = 21
Lines = 21
MinimumZScore = 1.5
ValidPercent = 80
EndGroup
Group = SearchChip
Samples = 75
Lines = 1000
EndGroup
EndObject
The search chip defines the search range for which ``pointreg`` will
look for matching images. The pattern chip is simply the kernel size of
the matching template. The search range is specific for this image pair.
The control network result after ``autoseed`` had a large vertical
offset on the order of 500 pixels. The large misalignment dictated the
need for the large search in the lines direction. Use ``qnet`` to get an
idea for what the pixel shifts look like in your stereo pair to help you
decide on a search range. In this example, only one measurement failed
to match automatically. Here are the arguments to use in this example of
``pointreg``.
::
ISIS> pointreg fromlist=cube.lis cnet=control.net \
onet=control_pointreg.net deffile=autoRegTemplate.def
The third step is to verify the measurements in ``qnet``, and, if necessary,
apply manual corrections. Type ``qnet`` in the terminal and then open
*cube.lis*, followed by *control_pointreg.net*. From the Control Network
Navigator window, click, as before, on the first point, *vallis0001*. That opens
a third window called the Qnet Tool. That window will allow you to play a flip
animation that shows alignment of the feature between the two images. Correcting
a measurement is performed by left clicking in the right image, then clicking
*Save Measure*, and finally finishing by clicking *Save Point*.
In this tutorial, measurement *0025* ended up being incorrect. Your number may
vary if you used different settings than the above or if MOC SPICE (camera pose)
data has improved since this writing. When finished, go back to the main Qnet
window. Save the final control network as *control_qnet.net* by clicking on
*File*, and then *Save As*.
.. figure:: images/qnet/Qnet_AfterQnetManual_400px.png
:name: after_manual
:alt: After Qnet
A visualization of the features after manual editing in ``qnet``.
Note that the marks now appear in the same location between images.
Once the control network is finished, it is finally time to start bundle
adjustment. Here's how ``jigsaw`` is called::
ISIS> jigsaw \
fromlist = cube.lis \
update = yes \
twist = no \
radius = yes \
point_radius_sigma = 1000 \
cnet = control_qnet.net \
onet = control_ba.net
The update option defines that we would like to update the camera pointing, if
our bundle adjustment converges. The ``twist = no`` option says to not solve for
the camera rotation about the camera bore. That property is usually very well
known as it is critical for integrating an image with a line-scan camera. The
``radius = yes`` setting means that the radius of the 3D features can be solved
for. Using ``radius = no`` will force the points to use height values from
another source, usually LOLA or MOLA. The ``point_radius_sigma`` option defines
the uncertainty of the radius of the 3D points, in units of meter.
The above command will print out diagnostic information from
every iteration of the optimization algorithm. The most important
feature to look at is the *sigma0* value. It represents the mean of
pixel errors in the control network. In our run, the initial error was
1065 pixels and the final solution had an error of 1.1 pixels.
Producing a DEM using the newly created camera corrections is the same
as covered in the Tutorial. When using ``jigsaw``, it modifies
a copy of the SPICE data that is stored internally to the cube file.
Thus, when we want to create a DEM using the correct camera geometry, no extra
information needs to be given to ``parallel_stereo`` since it is already
contained in the camera files.
More information is in the `jigsaw documentation
`_.
See :numref:`control_network` for how to use the resulting control network in
``bundle_adjust``.
In the event a mistake has been made, ``spiceinit`` will overwrite the SPICE
data inside a cube file and provide the original uncorrected camera pointing.
It can be invoked on each cub file as::
ISIS> spiceinit from=image.cub
In either case, then one can run stereo::
ISIS> parallel_stereo \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
E0201461.cub M0100115.cub \
stereo/run
See :numref:`nextsteps` for how how to improve the quality of stereo
correlation results (at the expense of speed), how to create a DEM,
etc.
.. _jigsaw_cnet_details:
Using the ISIS cnet format in ASP
---------------------------------
ASP's ``bundle_adjust`` program can read and write control networks in the ISIS
format (and they are read by ``jitter_solve`` as well). A basic overview of how
this works is in :numref:`jigsaw_cnet`. This section provides more details.
A priori surface points will be read and written back (they may change only in
special cases, see below). Adjusted surface points will be read, optimized, then
written back.
For constrained surface points, the constraint will be relative to the a priori
surface points. These will be used with sigmas from adjusted surface points, as
the a priori sigmas are on occasion negative, and likely the adjusted sigmas are
more up-to-date.
Constrained surface points are treated as GCP in ``bundle_adjust``
(:numref:`bagcp`), so smaller sigmas result in more weight given to the
discrepancy betwen surface points being optimized and a priori surface points.
Fixed surface points will be set to the a priori values and kept fixed during
the optimization.
Any input points that are flagged as ignored or rejected will be treated as
outliers and will not be used in the optimization. They will be saved the same
way. Additional points may be tagged as outliers during optimization. These will
be flagged as ignored and rejected on output.
Partially constrained points will be treated as free points during the
optimization, but the actual flags will be preserved on saving.
Control measure sigmas are read and written back. They will be used in the
optimization. If not set in the input file, they will be assigned the value 1.0
by ``bundle_adjust``, and it is this value that will be saved.
Pixel measurements will have 0.5 subtracted on input, and then added back on
output.
If ``bundle_adjust`` is invoked with GCP files specified separately in ASP's GCP
format, the GCP will be appended to the ISIS control network and then saved
together with it. These points will be treated as constrained (with provided
sigmas and a priori surface values), unless the sigmas are set to the precise
value of 1e-10, or when the flag ``--fix-gcp-xyz`` is used, in which case they
will be treated as fixed both during optimization and when saving to the ISIS
control network file. (For a small value of sigma, GCP are practically fixed in
either case.)
Using the ``bundle_adjust`` options ``--initial-transform`` and
``--input-adjustments-prefix`` will force the recomputation of a priori points
(using triangulation), as these options can drastically change the cameras.
A priori points will change if ``--heights-from-dem`` is used
(:numref:`heights_from_dem`). The sigmas will be set to what is provided via the
``--heights-from-dem-uncertainty`` option.
If exporting match files from an ISIS control network (option
``--output-cnet-type match-files``), constrained and fixed points won't be
saved, as ASP uses GCP files for that. Saved match files will have the rest of
the matches, and clean match files will have only the inliers. Any sigma values
and surface points from the control network will not be saved.
================================================
FILE: docs/conf.py
================================================
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import re
from pathlib import Path
# -- Project information -----------------------------------------------------
project = 'Ames Stereo Pipeline'
copyright = (
'2009-2025, United States Government as represented by the '
'Administrator of the National Aeronautics and Space Administration'
)
author = 'ASP Authors'
# Extract the ASP version from ../src/CMakeLists.txt
version_text = Path("../src/CMakeLists.txt").read_text()
version_lines = version_text.splitlines()
# Find the line that sets the version. Ignore comments.
version_string = ""
for line in version_lines:
# Match at starting of line PACKAGE_VERSION followed something in parentheses
m = re.match(r'^\s*set\s*\(\s*PACKAGE_VERSION\s+(.*?)\s*\)$', line)
if m:
version_string = m.group(1)
break
if version_string == "":
raise Exception("Could not find the version in CMakeLists.txt")
# The short X.Y version
version = version_string.replace('_', '-')
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.bibtex',
]
bibtex_bibfiles = ['bibliography.bib', 'papersusingasp.bib']
numfig = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'AmesStereoPipelinedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'asp_book.tex', 'Ames Stereo Pipeline Documentation',
r'Ross A. Beyer, Oleg Alexandrov, Scott McMichael, \\ and the ASP contributors',
'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'amesstereopipeline', 'Ames Stereo Pipeline Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AmesStereoPipeline', 'Ames Stereo Pipeline Documentation',
author, 'AmesStereoPipeline', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
================================================
FILE: docs/contributing.rst
================================================
.. include:: ../CONTRIBUTING.rst
================================================
FILE: docs/correlation.rst
================================================
.. _correlation:
Advanced stereo topics
======================
In this chapter we will dive much deeper into understanding the core
algorithms in the Stereo Pipeline. We start with an overview of the five
stages of stereo reconstruction. Then we move into an in-depth
discussion and exposition of the various correlation algorithms.
The goal of this chapter is to build an intuition for the stereo
correlation process. This will help users to identify unusual results in
their DEMs and hopefully eliminate them by tuning various parameters in
the ``stereo.default`` file (:numref:`stereodefault`). For scientists and
engineers who are using DEMs produced with the Stereo Pipeline, this
chapter may help to answer the question, "What is the Stereo Pipeline
doing to the raw data to produce this DEM?"
A related question that is commonly asked is, "How accurate is a DEM
produced by the Stereo Pipeline?" This chapter does not yet address
matters of accuracy and error, however we have several efforts underway
to quantify the accuracy of Stereo Pipeline-derived DEMs, and will be
publishing more information about that shortly. Stay tuned.
The entire stereo correlation process, from raw input images to a point
cloud or DEM, can be viewed as a multistage pipeline as depicted in
:numref:`asp`, and detailed in the following sections.
.. figure:: images/asp.png
:alt: Flow of data through the Stereo Pipeline.
:name: asp
Flow of data through the Stereo Pipeline.
Pre-processing
--------------
The first optional (but recommended) step in the process is least
squares Bundle Adjustment, which is described in detail in
:numref:`bundle_adjustment`.
Next, the left and right images are roughly aligned using one of
the four methods: (1) a homography transform of the right image
based on automated tie-point measurements (interest point matches),
(2) an affine epipolar
transform of both the left and right images (also based on tie-point
measurements as earlier), the effect of which is equivalent to
rotating the original cameras which took the pictures, (3) a 3D
rotation that achieves epipolar rectification (only implemented for
Pinhole sessions for missions like MER or K10, see
:numref:`mer-example` and :numref:`k10example`) or (4)
map-projection of both the left and right images using the ISIS
``cam2map`` command or through the more general ``mapproject`` tool
that works for any cameras supported by ASP (see :numref:`mapproj-example`
for the latter). The first three options can be applied automatically
by the Stereo Pipeline when the ``alignment-method`` variable in
the ``stereo.default`` file is set to ``affineepipolar``, ``homography``,
or ``epipolar``, respectively.
The latter option, running ``cam2map``, ``cam2map4stereo.py``, or
``mapproject`` must be carried out by the user prior to invoking the
``parallel_stereo`` command. Map-projecting the images using ISIS eliminates any
unusual distortion in the image due to the unusual camera acquisition
modes (e.g. pitching "ROTO" maneuvers during image acquisition for MOC,
or highly elliptical orbits and changing line exposure times for the ,
HRSC). It also eliminates some of the perspective differences in the
image pair that are due to large terrain features by taking the existing
low-resolution terrain model into account (e.g. the :term:`MOLA`, :term:`LOLA`,
:term:`NED`, or :term:`ULCN` 2005 models).
In essence, map-projecting the images results in a pair of very closely
matched images that are as close to ideal as possible given existing
information. This leaves only small perspective differences in the
images, which are exactly the features that the stereo correlation
process is designed to detect.
For this reason, we recommend map-projection for pre-alignment of most
stereo pairs. Its only cost is longer triangulation times as more math
must be applied to work back through the transforms applied to the
images. In either case, the pre-alignment step is essential for
performance because it ensures that the disparity search space is
bounded to a known area. In both cases, the effects of pre-alignment are
taken into account later in the process during triangulation, so you do
not need to worry that pre-alignment will compromise the geometric
integrity of your DEM.
In some cases the pre-processing step may also normalize the pixel
values in the left and right images to bring them into the same
dynamic range. Various options in the ``stereo.default`` file affect
whether or how normalization is carried out, including
``individually-normalize`` and ``force-use-entire-range``. Although
the defaults work in most cases, the use of these normalization
steps can vary from data set to data set, so we recommend you refer
to the examples in :numref:`examples` to see if these are necessary
in your use case.
Finally, pre-processing can perform some filtering of the input
images (as determined by ``prefilter-mode``) to reduce noise and
extract edges in the images. When active, these filters apply a
kernel with a sigma of ``prefilter-kernel-width`` pixels that can
improve results for noisy images (``prefilter-mode`` must be chosen
carefully in conjunction with ``cost-mode``, see :numref:`stereodefault`).
The pre-processing modes that extract image edges are useful for
stereo pairs that do not have the same lighting conditions, contrast,
and absolute brightness :cite:`Nishihara84practical`. We recommend
that you use the defaults for these parameters to start with, and
then experiment only if your results are sub-optimal.
.. _stereo_corr:
Disparity map initialization
----------------------------
Correlation is the process at the heart of the Stereo Pipeline. It is a
collection of algorithms that compute correspondences between pixels in the left
image and pixels in the right image. The map of these correspondences is called
a *disparity map*. This is saved in the file named ``output_prefix-D.tif``
(:numref:`out_corr_files`).
A disparity map is an image whose pixel locations correspond to the pixel
:math:`(u,v)` in the left image, and whose pixel values contain the horizontal
and vertical offsets :math:`(d_u, d_v)` to the matching pixel in the right
image, which is :math:`(u+d_u, v+d_v)`.
The correlation process attempts to find a match for every pixel in the
left image. The only pixels skipped are those marked invalid in the mask
images. For large images (e.g. from HiRISE, , LROC, or WorldView), this
is very expensive computationally, so the correlation process is split
into two stages. The disparity map initialization step computes
approximate correspondences using a pyramid-based search that is highly
optimized for speed, but trades resolution for speed. The results of
disparity map initialization are integer-valued disparity estimates. The
sub-pixel refinement step takes these integer estimates as initial
conditions for an iterative optimization and refines them using the
algorithm discussed in the next section.
We employ several optimizations to accelerate disparity map
initialization: (1) a box filter-like accumulator that reduces duplicate
operations during correlation :cite:`Sun02rectangular`; (2)
a coarse-to-fine pyramid based approach where disparities are estimated
using low-resolution images, and then successively refined at higher
resolutions; and (3) partitioning of the disparity search space into
rectangular sub-regions with similar values of disparity determined in
the previous lower resolution level of the pyramid
:cite:`Sun02rectangular`.
.. figure:: images/correlation/correlation_400px.png
:name: correlation_window
:alt: Correlation example
The correlation algorithm in disparity map initialization uses a
sliding template window from the left image to find the best match in
the right image. The size of the template window can be adjusted
using the ``H_KERN`` and ``V_KERN`` parameters in the
``stereo.default`` file, and the search range can be adjusted using
the ``{H,V}_CORR_{MIN/MAX}`` parameters.
Naive correlation itself is carried out by moving a small, rectangular
template window from the from left image over the specified search
region of the right image, as in :numref:`correlation_window`. The
"best" match is determined by applying a cost function that compares the
two windows. The location at which the window evaluates to the lowest
cost compared to all the other search locations is reported as the
disparity value. The ``cost-mode`` variable allows you to choose one of
three cost functions, though we recommend normalized cross correlation
:cite:`Menard97:robust`, since it is most robust to slight
lighting and contrast variations between a pair of images. Try the
others if you need more speed at the cost of quality.
.. _d_sub:
Low-resolution disparity
------------------------
Producing the disparity map at full resolution as in :numref:`stereo_corr` is
computationally expensive. To speed up the process, ASP starts by first creating
a low-resolution initial guess version of the disparity map. This is saved
in the file ``output_prefix-D_sub.tif`` (:numref:`out_corr_files`).
Four methods are available for producing this low-resolution disparity,
described below.
.. _d_sub_corr:
Disparity from stereo correlation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The default approach is to use for the low-resolution disparity the same
algorithm as for the full-resolution one, but called with the low-resolution
images ``output_prefix-L_sub.tif`` and ``output_prefix-R_sub.tif``.
Those "sub" images have their size chosen so that their area is around 2.25
megapixels, a size that is easily viewed on the screen unlike the raw source
images.
This corresponds to the ``parallel_stereo`` option ``--corr-seed-mode 1``
(:numref:`stereodefault`).
.. _d_sub_dem:
Disparity from a DEM
~~~~~~~~~~~~~~~~~~~~
The low resolution disparity can be computed from a lower-resolution initial
guess DEM of the area. This works with all alignment methods except ``epipolar``
(:numref:`image_alignment`). Mapprojected images are supported
(:numref:`mapproj-example`).
This option assumes rather good alignment between the cameras and the DEM.
Otherwise see :numref:`ba_pc_align`. The option ``--disparity-estimation-dem-error``
should be used to specify the uncertainty in such a DEM.
This can be useful when there are a lot of clouds, or terrain features are not
seen well at low resolution.
As an example, invoke ``parallel_stereo`` with options along the lines of::
--corr-seed-mode 2 \
--disparity-estimation-dem ref.tif \
--disparity-estimation-dem-error 5
When features are washed out at low resolution, consider also adding the option
``--corr-max-levels 2``, or see :numref:`sparse_disp`.
See :numref:`stereodefault` for more information on these options.
It is suggested to extract the produced low-resolution disparity bands with
``gdal_translate`` (:numref:`mask_disparity`) or ``disparitydebug``
(:numref:`disparitydebug`). Inspect them in ``stereo_gui``
(:numref:`stereo_gui`).
.. _sparse_disp:
Sparse disparity from full-resolution images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For snowy landscapes, whose only features may be small-scale grooves or ridges
sculpted by wind (so-called *zastrugi*), the low-resolution images appear blank,
so the default low-resolution disparity approach in :numref:`d_sub_corr` fails.
One can then use a disparity from a DEM (:numref:`d_sub_dem`), skip the
low-resolution disparity (:numref:`d_sub_skip`), or the approach outlined in
this section, based on the tool named ``sparse_disp``.
This program create the low-resolution initial disparity
``output_prefix-D_sub.tif`` from the full-resolution images, yet only at a
sparse set of pixels for reasons, of speed. This low-resolution disparity is
then refined as earlier using a pyramid approach, but with fewer levels,
to prevent the features being washed out.
.. figure:: images/examples/sparse_disp.png
:name: fig:sparse-disp-example
:figwidth: 100%
Example of a difficult terrain obtained without (left) and with (right)
``sparse_disp``. (In these DEMs there is very little elevation change,
hence the flat appearance.)
Here is an example:
::
parallel_stereo -t dg --corr-seed-mode 3 \
--corr-max-levels 2 \
left_mapped.tif right_mapped.tif \
12FEB12053305-P1BS_R2C1-052783824050_01_P001.XML \
12FEB12053341-P1BS_R2C1-052783824050_01_P001.XML \
dg/dg srtm_53_07.tif
This tool can be customized with the ``parallel_stereo`` switch
``--sparse-disp-options``.
Installation of sparse_disp
^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``sparse_disp`` tool is written in Python, and makes use of the ``gdal``,
``numpy``, and ``scipy`` packages. To use it, these packages must be installed
with ``conda``.
It is very important to use the same version of ``python``, ``numpy``, and
``gdal`` as in ASP. Hence, make adjustments below and then run::
conda create -n sparse_disp c conda-forge \
python=3.12.2 numpy=1.26.4 gdal=3.8.1 scipy
ASP can be told where to look for these packages with a line such as::
export ASP_PYTHON_MODULES_PATH=$HOME/miniconda3/envs/sparse_disp/lib/python3.1/site-packages
Here, also need to adjust appropriately the ``conda`` installation location
and ``python`` version.
If ASP is installed with ``conda`` (:numref:`conda_intro`), and ISIS version is
at least 9.0.0, it will already have all the needed dependencies. Hence, can
adjust ``ASP_PYTHON_MODULES_PATH`` to point to the ``site-packages`` directory
of the ASP conda environment.
.. _d_sub_skip:
Skip the low-resolution disparity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Any large failure in the low-resolution disparity image will be detrimental to
the performance of the higher resolution disparity. In the event that the
low-resolution disparity is completely unhelpful, it can be skipped by adding
``corr-seed-mode 0`` in the ``stereo.default`` file and using a manual search
range (:numref:`search_range`).
This should only be considered in cases where the texture in an image is
completely lost when subsampled. An example would be satellite images of fresh
snow in the Arctic. Alternatively, ``output_prefix-D_sub.tif`` can be computed
at a sparse set of pixels at full resolution, as described in
:numref:`sparse_disp`.
More on the correlation process
-------------------------------
Debugging disparity map initialization
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Never will all pixels be successfully matched during stereo matching.
Though a good chunk of the image should be correctly processed. If you
see large areas where matching failed, this could be due to a variety of
reasons:
- In regions where the images do not overlap, there should be no valid
matches in the disparity map.
- Match quality may be poor in regions of the images that have
different lighting conditions, contrast, or specular properties of
the surface.
- Areas that have image content with very little texture or extremely
low contrast may have an insufficient signal to noise ratio, and will
be rejected by the correlator.
- Areas that are highly distorted due to different image perspective,
such as crater and canyon walls, may exhibit poor matching
performance. This could also be due to failure of the preprocessing
step in aligning the images. The correlator can not match images that
are rotated differently from each other or have different
scale/resolution. Mapprojection is used to at least partially rectify
these issues (:numref:`mapproj-example`).
Bad matches, often called "blunders" or "artifacts" are also common, and
can happen for many of the same reasons listed above. The Stereo
Pipeline does its best to automatically detect and eliminate these
blunders, but the effectiveness of these outlier rejection strategies
does vary depending on the quality of the input images.
When tuning up your ``stereo.default`` file, you will find that it is
very helpful to look at the raw output of the disparity map
initialization step. This can be done using the ``disparitydebug`` tool,
which converts the ``output_prefix-D.tif`` file into a pair of normal
images that contain the horizontal and vertical components of disparity.
You can open these in a standard image viewing application and see
immediately which pixels were matched successfully, and which were not.
Stereo matching blunders are usually also obvious when inspecting these
images. With a good intuition for the effects of various
``stereo.default`` parameters and a good intuition for reading the
output of ``disparitydebug``, it is possible to quickly identify and
address most problems.
If you are seeing too many holes in your disparity images, one option
that may give good results is to increase the size of the correlation
kernel used by ``stereo_corr`` with the ``--corr-kernel`` option.
Increasing the kernel size will increase the processing time but should
help fill in regions of the image where no match was found.
.. figure:: images/correlation/stereo_corr_box_compare.png
:name: corr-kernel-size-effect
:alt: Correlation Kernel Size
The effect of increasing the correlation kernel size from 35 (left)
to 75 (right). This location is covered in snow and several regions
lack texture for the correlator to use but a large kernel increases
the chances of finding useful texture for a given pixel.
.. figure:: images/correlation/quantile_filter_result.png
:name: quantile-filtering-effect
:alt: Quantile Filtering
The effect of using the ``rm-quantile`` filtering option in
``stereo_corr``. In the left image there are a series of high
disparity "islands" at the bottom of the image. In the right image
quantile filtering has removed those islands while leaving the rest
of the image intact.
.. _search_range:
Search range determination
~~~~~~~~~~~~~~~~~~~~~~~~~~
In some circumstances, the low-resolution disparity ``D_sub.tif`` computation
may fail, or it may be inaccurate. This can happen for example if only very
small features are present in the original images, and they disappear during the
resampling that is necessary to obtain ``D_sub.tif``.
In this case, it is possible to set ``corr-seed-mode`` from the default of 1 to
the values of 2 or 3, that will use a DEM or sample the full-resolution images
to produce a low-resolution disparity (:numref:`d_sub`).
Or, set ``corr-seed-mode`` to 0, and manually specify a search range to use for
full-resolution correlation via the parameter ``corr-search``. In
``stereo.default`` (:numref:`stereodefault`) this parameter's entry will look
like::
corr-search -80 -2 20 2
The search range can also be set on the command line (:numref:`cmdline`).
The exact values to use with this option you'll have to discover
yourself. These four numbers represent the horizontal
minimum boundary, vertical minimum boundary, horizontal maximum
boundary, and finally the horizontal maximum boundary within which we
will search for the disparity during correlation.
It can be tricky to select a good search range. That's why the best
way is to let ``parallel_stereo`` perform an automated determination.
If you think that you can do a better estimate of the search range,
take look at what search ranges ``stereo_corr`` prints in the log files
in the output directory, and examine the intermediate disparity images
using the ``disparitydebug`` program, to figure out which search
directions can be expanded or contracted. The output images will
clearly show good data or bad data depending on whether the search
range is correct.
The worst case scenario is to determine the search range manually. The
aligned ``L.tif`` and ``R.tif`` images (:numref:`outputfiles`) can be
opened in ``stereo_gui`` (:numref:`stereo_gui`), and the coordinates
of points that can be matched visually can be compared. Click on a
pixel to have its coordinates printed in the terminal. Subtract row
and column locations of a feature in the first image from the
locations of the same feature in the second image, and this will yield
offsets that can be used in the search range. Make several of these
offset measurements (for example, for features at higher and then
lower elevations), and use them to define a row and column bounding
box, then expand this by 50% and use it for ``corr-search``. This will
produce good results in most cases.
If the search range produced automatically from the low-resolution
disparity is too big, perhaps due to outliers, it can be tightened
with either ``--max-disp-spread`` or ``--corr-search-limit``, before
continuing with full-resolution correlation (:numref:`stereodefault`).
But note that for very steep terrains and no use of mapprojection a
large search range is expected, and tightening it too much may result
in an inaccurate disparity.
.. _subpixel:
Sub-pixel refinement
--------------------
Once disparity map initialization is complete, every pixel in the
disparity map will either have an estimated disparity value, or it will
be marked as invalid. All valid pixels are then adjusted in the
sub-pixel refinement stage based on the ``subpixel-mode`` setting.
The first mode is parabola-fitting sub-pixel refinement
(``subpixel-mode 1``). This technique fits a 2D parabola to points on
the correlation cost surface in an 8-connected neighborhood around the
cost value that was the "best" as measured during disparity map
initialization. The parabola's minimum can then be computed analytically
and taken as as the new sub-pixel disparity value.
This method is easy to implement and extremely fast to compute, but it
exhibits a problem known as pixel-locking: the sub-pixel disparities
tend toward their integer estimates and can create noticeable "stair
steps" on surfaces that should be smooth
:cite:`Stein06:attenuating,Szeliski03sampling`. See for
example :numref:`parabola_subpixel`.
Furthermore, the parabola subpixel mode is not capable of refining a
disparity estimate by more than one pixel, so although it produces
smooth disparity maps, these results are not much more accurate than the
results that come out of the disparity map initialization in the first
place. However, the speed of this method makes it very useful as a
"draft" mode for quickly generating a DEM for visualization (i.e.
non-scientific) purposes. It is also beneficial in the event that a user
will simply downsample their DEM after generation in Stereo Pipeline.
.. figure:: images/correlation/parabola_results.png
:name: parabola_subpixel
Left: Input images. Center: results using the parabola draft
subpixel mode (subpixel-mode = 1). Right: results using the Bayes
EM high quality subpixel mode (subpixel-mode = 2).
For high quality results, we recommend ``subpixel-mode 2``: the Bayes EM
weighted affine adaptive window correlator. This advanced method
produces extremely high quality stereo matches that exhibit a high
degree of immunity to image noise. For example Apollo Metric Camera
images are affected by two types of noise inherent to the scanning
process: (1) the presence of film grain and (2) dust and lint particles
present on the film or scanner. The former gives rise to noise in the
DEM values that wash out real features, and the latter causes incorrect
matches or hard to detect blemishes in the DEM. Attenuating the effect
of these scanning artifacts while simultaneously refining the integer
disparity map to sub-pixel accuracy has become a critical goal of our
system, and is necessary for processing real-world data sets such as the
Apollo Metric Camera data.
The Bayes EM subpixel correlator also features a deformable template
window from the left image that can be rotated, scaled, and translated
as it zeros in on the correct match in the right image. This adaptive
window is essential for computing accurate matches on crater or canyon
walls, and on other areas with significant perspective distortion due to
foreshortening.
This affine-adaptive behavior is based on the Lucas-Kanade template
tracking algorithm, a classic algorithm in the field of computer vision
:cite:`Baker04:lucas-kanade`. We have extended this
technique; developing a Bayesian model that treats the Lucas-Kanade
parameters as random variables in an Expectation Maximization (EM)
framework. This statistical model also includes a Gaussian mixture
component to model image noise that is the basis for the robustness of
our algorithm. We will not go into depth on our approach here, but we
encourage interested readers to read our papers on the topic
:cite:`nefian:bayes_em,broxton:isvc09`.
However we do note that, like the computations in the disparity map
initialization stage, we adopt a multi-scale approach for sub-pixel
refinement. At each level of the pyramid, the algorithm is initialized
with the disparity determined in the previous lower resolution level of
the pyramid, thereby allowing the subpixel algorithm to shift the
results of the disparity initialization stage by many pixels if a better
match can be found using the affine, noise-adapted window. Hence, this
sub-pixel algorithm is able to significantly improve upon the results to
yield a high quality, high resolution result.
Another option when run time is important is ``subpixel-mode 3``: the
simple affine correlator. This is essentially the Bayes EM mode with the
noise correction features removed in order to decrease the required run
time. In data sets with little noise this mode can yield results similar
to Bayes EM mode in approximately one fifth the time.
A different option is Phase Correlation, ``subpixel-mode 4``, which
implements the algorithm from :cite:`guizar2008efficient`.
It is slow and does not work well on slopes but since the algorithm is
very different it might perform in situations where the other algorithms
are not working well.
Triangulation
-------------
When running an ISIS session, the Stereo Pipeline uses geometric camera
models available in ISIS :cite:`anderson08:isis`. These
highly accurate models are customized for each instrument that ISIS
supports. Each ISIS "cube" file contains all of the information that is
required by the Stereo Pipeline to find and use the appropriate camera
model for that observation.
Other sessions such as DG (*DigitalGlobe*) or Pinhole, require that
their camera model be provided as additional arguments to the ``parallel_stereo``
command. Those camera models come in the form of an XML document for DG
and as ``*.pinhole, *.tsai, *.cahv, *.cahvor`` for Pinhole sessions.
Those files must be the third and forth arguments or immediately follow
after the two input images for ``parallel_stereo``.
.. figure:: images/correlation/camera_models.png
:name: camera_models
:alt: Camera Models
Most remote sensing cameras fall into two generic categories
based on their basic geometry. Framing cameras (left) capture an
instantaneous two-dimensional image. Linescan cameras (right)
capture images one scan line at a time, building up an image over
the course of several seconds as the satellite moves through the
sky.
ISIS camera models account for all aspects of camera geometry, including
both intrinsic (i.e. focal length, pixel size, and lens distortion) and
extrinsic (e.g. camera position and orientation) camera parameters.
Taken together, these parameters are sufficient to "forward project" a
3D point in the world onto the image plane of the sensor. It is also
possible to "back project" from the camera's center of projection
through a pixel corresponding to the original 3D point.
.. figure:: images/correlation/triangulation_400px.png
:name: triangulation
:alt: Triangulation
Once a disparity map has been generated and refined, it can be used
in combination with the geometric camera models to compute the
locations of 3D points on the surface of Mars. This figure shows the
position (at the origins of the red, green, and blue vectors) and
orientation of the Mars Global Surveyor at two points in time where
it captured images in a stereo pair.
Notice, however, that forward and back projection are not symmetric
operations. One camera is sufficient to "image" a 3D point onto a pixel
located on the image plane, but the reverse is not true. Given only a
single camera and a pixel location :math:`x = (u,v),` that is the image
of an unknown 3D point :math:`P = (x,y,z)`, it is only possible to
determine that :math:`P` lies somewhere along a ray that emanates from
the camera center through the pixel location :math:`x`
on the image plane (see :numref:`camera_models`).
Alas, once images are captured, the route from image pixel back to
3D points in the real world is through back projection, so we must
bring more information to bear on the problem of uniquely reconstructing
our 3D point. In order to determine :math:`P` using back projection,
we need *two* cameras that both contain pixel locations :math:`x_1`
and :math:`x_2` where :math:`P` was imaged. Now, we have two rays
that converge on a point in 3D space (see :numref:`triangulation`).
The location where they meet must be the original location of
:math:`P`.
.. _triangulation_error:
Triangulation error
~~~~~~~~~~~~~~~~~~~
In practice, the rays emanating from matching pixels in the cameras
rarely intersect perfectly on the ground because any slight error in
the position or pointing information of the cameras will affect the
accuracy of the rays. The matching (correlation) among the images is
also not perfect, contributing to the error budget. Then, we take the
*closest point of intersection* of the two rays as the location of the
intersection point :math:`P`.
Additionally, the actual shortest distance between the rays at this
point is an interesting and important error metric that measures how
self-consistent our two camera models are for this point. It will be
seen in the next chapter that this information, when computed and
averaged over all reconstructed 3D points, can be a valuable statistic
for determining whether to carry out bundle adjustment
(:numref:`bundle_adjust`).
The distance between the two rays emanating from matching points in
the cameras at their closest intersection is recorded in the fourth
channel of the point cloud file, ``output-prefix-PC.tif``. This is
called the *triangulation error*, or the *ray intersection error*. It
is measured in meters. This error can be gridded when a DEM is created
from the point cloud by using the ``--errorimage`` argument on the
``point2dem`` command (:numref:`point2dem`).
This error *is not* the true accuracy of the DEM. It is only another
indirect measure of quality. A DEM with high triangulation error, as
compared to the ground sample distance, is always bad and should have
its images bundle-adjusted. A DEM with low triangulation error is at
least self-consistent, but could still be bad, or at least
misaligned.
If, after bundle adjustment, the triangulation error is still high at
the image corners and the inputs are Pinhole cameras, one may have to
refine the intrinsics, including the distortion model.
:numref:`bundle_adjustment` discusses bundle adjustment, including
optimizing the intrinsics.
To improve the location of a triangulated point cloud or created DEM
relative to a known ground truth, use alignment (:numref:`pc_align`).
See :numref:`error_propagation` for another metric qualifying
the accuracy of a point cloud or DEM, namely the horizontal and vertical
uncertainty, as propagated from the input cameras.
.. _mapproj_with_cam2map:
Stereo with images mapprojected using ISIS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a continuation of the discussion at :numref:`moc_tutorial`. It
describes how to mapproject the input images using the ISIS tool
``cam2map`` and how to run stereo with the obtained
images. Alternatively, the images can be mapprojected using ASP
itself, per :numref:`mapproj-example`.
Mapprojection can result in improved results for steep slopes, when
the images are taken from very different perspectives, or if the
curvature of the planet/body being imaged is non-negligible.
We will now describe how this works, but we also provide the
``cam2map4stereo.py`` program (:numref:`cam2map4stereo.py`) which does
this automatically.
The ISIS ``cam2map`` program will map-project these images::
ISIS> cam2map from=M0100115.cub to=M0100115.map.cub
ISIS> cam2map from=E0201461.cub to=E0201461.map.cub \
map=M0100115.map.cub matchmap=true
At this stage we can run the stereo program with map-projected images:
::
ISIS> parallel_stereo E0201461.map.cub M0100115.map.cub \
--alignment-method none -s stereo.default.example \
results/output
Here we have used ``alignment-method none`` since ``cam2map4stereo.py``
brought the two images into the same perspective and using the same
resolution. If you invoke ``cam2map`` independently on the two images,
without ``matchmap=true``, their resolutions may differ, and using an
alignment method rather than ``none`` to correct for that is still
necessary.
Now you may skip to chapter :numref:`nextsteps` which will discuss the
``parallel_stereo`` program in more detail and the other tools in ASP.
Or, you can continue reading below for more details on mapprojection.
Advanced discussion of mapprojection
------------------------------------
Notice the order in which the images were run through ``cam2map``. The
first projection with ``M0100115.cub`` produced a map-projected image
centered on the center of that image. The projection of ``E0201461.cub``
used the ``map=`` parameter to indicate that ``cam2map`` should use the
same map projection parameters as those of ``M0100115.map.cub``
(including center of projection, map extents, map scale, etc.) in
creating the projected image. By map-projecting the image with the worse
resolution first, and then matching to that, we ensure two things: (1)
that the second image is summed or scaled down instead of being
magnified up, and (2) that we are minimizing the file sizes to make
processing in the Stereo Pipeline more efficient.
Technically, the same end result could be achieved by using the
``mocproc`` program alone, and using its ``map= M0100115.map.cub``
option for the run of ``mocproc`` on ``E0201461.cub`` (it behaves
identically to ``cam2map``). However, this would not allow for
determining which of the two images had the worse resolution and
extracting their minimum intersecting bounding box (see below).
Furthermore, if you choose to conduct bundle adjustment (see
:numref:`bundle_adjustment`) as a pre-processing step, you would
do so between ``mocproc`` (as run above) and ``cam2map``.
The above procedure is in the case of two images which cover similar
real estate on the ground. If you have a pair of images where one image
has a footprint on the ground that is much larger than the other, only
the area that is common to both (the intersection of their areas) should
be kept to perform correlation (since non-overlapping regions don't
contribute to the stereo solution).
ASP normally has no problem identifying the shared area and it still
run well. Below we describe, for the adventurous user, some
fine-tuning of this procedure.
If the image with the larger footprint size also happens to be the
image with the better resolution (i.e. the image run through
``cam2map`` second with the ``map=`` parameter), then the above
``cam2map`` procedure with ``matchmap=true`` will take care of it just
fine. Otherwise you'll need to figure out the latitude and longitude
boundaries of the intersection boundary (with the ISIS ``camrange``
program). Then use that smaller boundary as the arguments to the
``MINLAT``, ``MAXLAT``, ``MINLON``, and ``MAXLON`` parameters of the
first run of ``cam2map``. So in the above example, after ``mocproc``
with ``Mapping= NO`` you'd do this:
::
ISIS> camrange from=M0100115.cub
... lots of camrange output omitted ...
Group = UniversalGroundRange
LatitudeType = Planetocentric
LongitudeDirection = PositiveEast
LongitudeDomain = 360
MinimumLatitude = 34.079818835324
MaximumLatitude = 34.436797628116
MinimumLongitude = 141.50666207418
MaximumLongitude = 141.62534719278
End_Group
... more output of camrange omitted ...
::
ISIS> camrange from=E0201461.cub
... lots of camrange output omitted ...
Group = UniversalGroundRange
LatitudeType = Planetocentric
LongitudeDirection = PositiveEast
LongitudeDomain = 360
MinimumLatitude = 34.103893080982
MaximumLatitude = 34.547719435156
MinimumLongitude = 141.48853937384
MaximumLongitude = 141.62919740048
End_Group
... more output of camrange omitted ...
Now compare the boundaries of the two above and determine the
intersection to use as the boundaries for ``cam2map``:
::
ISIS> cam2map from=M0100115.cub to=M0100115.map.cub \
DEFAULTRANGE=CAMERA MINLAT=34.10 MAXLAT=34.44 \
MINLON=141.50 MAXLON=141.63
ISIS> cam2map from=E0201461.cub to=E0201461.map.cub \
map=M0100115.map.cub matchmap=true
You only have to do the boundaries explicitly for the first run of
``cam2map``, because the second one uses the ``map=`` parameter to mimic
the map-projection of the first. These two images are not radically
different in spatial coverage, so this is not really necessary for these
images, it is just an example.
Again, unless you are doing something complicated, using the
``cam2map4stereo.py`` (:numref:`cam2map4stereo.py`) will take care of
all these steps for you.
.. _local_alignment_issues:
Identifying issues in local alignment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Stereo with local epipolar alignment (:numref:`running-stereo`) can
perform better than with global affine epipolar alignment. Yet, when stereo
fails on a locally aligned tile pair, it is instructive to understand
why. Usually it is because the images are difficult at
that location, such as due to very steep terrain, clouds, shadows, etc.
For a completed ``parallel_stereo`` run which failed in a portion, the
first step is to identify the offending tile directory. For that, open the
produced DEM in ``stereo_gui``, and use the instructions at
:numref:`image_bounds` to find the approximate longitude, latitude,
and height at the problematic location.
Then run ``stereo_parse`` with the same options as ``parallel_stereo``
and the flag::
--tile-at-location ''
This should print on the screen a text like::
Tile with location: run/run-2048_3072_1024_1024
If a run failed to complete, find the most recent output tile
directories that were being worked on, based on modification time, and
investigate one of them.
In either case, given a candidate for a problematic tile, from the log
file of ``stereo_corr`` in that tile's directory you can infer the full
correlation command that failed. Re-run it, while appending the option::
--local-alignment-debug
Images and interest point matches before and after alignment will be
saved. Those can be examined as::
stereo_gui -left-crop.tif -right-crop.tif \
-left-crop__right-crop.match
and::
stereo_gui -left-aligned-tile.tif \
-right-aligned-tile.tif \
-left-aligned-tile__right-aligned-tile.match
================================================
FILE: docs/environment.yml
================================================
# conda environment needed to build the docs.
channels:
- conda-forge
- defaults
dependencies:
- sphinx
- sphinxcontrib-bibtex
================================================
FILE: docs/error_propagation.rst
================================================
.. _error_propagation:
Error propagation
=================
At the triangulation stage, ``parallel_stereo`` can propagate the
errors (uncertainties, standard deviations, covariances) from the
input cameras, computing the horizontal and vertical standard
deviation (stddev) of the uncertainty for each triangulated
point. This is enabled with the option ``--propagate-errors``
(:numref:`stereo-default-error-propagation`).
Bundle adjustment can propagate uncertainties as well
(:numref:`ba_error_propagation`).
The produced uncertainties can be exported from the point cloud to DEM and/or
LAS files (:numref:`export_stddev`).
The input uncertainties
-----------------------
The input uncertainties can be either numbers that are passed on the
command line, or, otherwise, for a few camera models, they can be read
from the camera files.
If the option ``--horizontal-stddev`` is set, with two positive
numbers as values, representing the left and right camera stddev of
position uncertainty in the local horizontal ground plane having the
triangulated point, then these values will be used. The input stddev
values are measured in meters. This functionality works with any
cameras supported by ASP.
If this option is not set, the following strategies are used:
- For Pleiades 1A/1B linescan camera models (:numref:`pleiades`)
(but not for NEO, :numref:`pleiades_neo`) the
``ACCURACY_STDV`` field is read from the "DIM" XML file for each camera
(in the *Absolute Horizontal Accuracy* section of the camera
model), and it is used as the horizontal stddev.
- For RPC cameras (:numref:`rpc`), the values ``ERRBIAS`` and ``ERRRAND`` are
read, whether set in XML files or as part of metadata that GDAL understands.
The square root of sum of squares of these quantities is the input horizontal
stddev for a camera.
- For Maxar (DigitalGlobe) linescan cameras (:numref:`dg_tutorial`),
the inputs are the satellite position and orientation covariances,
read from the ``EPHEMLIST`` and ``ATTLIST`` fields. These are
propagated from the satellites to the ground and then through
triangulation.
For datasets with a known CE90 measure, or in general a
:math:`CE_X` measure, where :math:`X` is between 0% and 100%,
use the ``--horizontal-stddev`` option, with values computed
using the formula:
.. math::
StdDev = CE_X/\sqrt{-2 \ln(1-X/100.0)}
(`reference
`_).
In all cases, the error propagation takes into account whether the cameras are
bundle-adjusted or not (:numref:`bundle_adjust`), and if the images are
mapprojected (:numref:`mapproj-example`).
Produced uncertainty for triangulated points
--------------------------------------------
The triangulation covariance matrix is computed in the local North-East-Down
(NED) coordinates at each nominal triangulated point, and further decomposed
into the horizontal and vertical components (:numref:`produced_covariances`).
The square root is taken, creating the horizontal and vertical standard
deviations, that are saved as the 5th and 6th band in the point cloud
(\*-PC.tif file, :numref:`outputfiles`). Running ``gdalinfo``
(:numref:`gdal_tools`) on the point cloud will show some metadata describing
each band in that file.
The computed stddev values are in units of meter.
Bundle adjustment
-----------------
Error propagation is also implemented in ``bundle_adjust``
(:numref:`ba_error_propagation`). In that case, the errors are computed at each
interest point, rather than densely.
The same underlying logic is employed as for stereo.
.. _export_stddev:
Export to DEM and LAS
---------------------
The stddev values in the point cloud can then be gridded with ``point2dem``
(:numref:`point2dem`) with the option ``--propagate-errors``, using the same
algorithm as for computing the DEM heights.
Example::
point2dem \
--t_srs \
--tr \
--propagate-errors \
run/run-PC.tif
This will produce the files ``run/run-HorizontalStdDev.tif`` and
``run/run-VerticalStdDev.tif`` alongside the output DEM, ``run/run-DEM.tif``.
In all these files the values are in units of meter.
The ``point2las`` program (:numref:`point2las`) can export the horizontal and
vertical stddev values from the point cloud to a LAS file.
Implementation details
----------------------
Note that propagating the errors subtly changes the behavior of stereo
triangulation, and hence also the output DEM. Triangulated points
are saved with a float precision of 1e-8 meters (rather than the usual
1e-3 meters or so, :numref:`triangulation_options`), to avoid creating
step artifacts later when gridding the rather slowly varying
propagated errors.
When error propagation is enabled, the triangulated point cloud stores
6 bands instead of the usual 4 (:numref:`outputfiles`), and the LZW
compression is somewhat less efficient since more digits of precision
are stored. The size of the point cloud roughly doubles. This does not
affect the size of the DEM, but its values and extent may change
slightly.
.. _uncertainty_vs_triangulation_err:
What the produced uncertainties are not
---------------------------------------
The horizontal and vertical stddev values created by stereo
triangulation and later gridded by ``point2dem`` measure the
uncertainty of each nominal triangulated point, given the
uncertainties in the input cameras.
This is not the discrepancy between this point's location as compared
to to a known ground truth. If the input cameras are translated by the
same amount in the ECEF coordinate system, the triangulated point
position can change a lot, but the produced uncertainties will change
very little. To estimate and correct a point cloud's geolocation
invoke an alignment algorithm (:numref:`pc_align`).
The produced uncertainties are not a measure of the pointing accuracy
(:numref:`triangulation_error`). Whether the rays from the cameras
meet at the nominal triangulated point perfectly, or their closest
distance is, for example, 5 meters, the produced uncertainties around
the nominal point will be about the same. See a comparison between
these errors in :numref:`grand_mesa_dem_intersection_err` and
:numref:`horizontal_vertical_error`.
The pointing accuracy can be improved by using bundle adjustment
(:numref:`bundle_adjust`) and solving for jitter
(:numref:`jitter_solve`).
Example
-------
For Maxar (DigitalGlobe) linescan cameras::
parallel_stereo \
--alignment-method local_epipolar \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
-t dg \
--propagate-errors \
left.tif right.tif left.xml right.xml
run/run
proj="+proj=utm +zone=13 +datum=WGS84 +units=m +no_defs"
point2dem --tr 1.6 \
--t_srs "$proj" \
--propagate-errors \
run/run-PC.tif
The projection and grid size above are dependent on the dataset. For steep
slopes, consider using mapprojection (:numref:`mapproj-example`).
Alternatively, the input horizontal stddev values for the cameras
can be set as::
--horizontal-stddev 1.05 1.11
Then these will be used instead. This last approach works for any
orbital camera model supported by ASP (:numref:`examples`).
.. figure:: images/grand_mesa_dem_intersection_err.png
:name: grand_mesa_dem_intersection_err
A hillshaded DEM created with DigitalGlobe WorldView images for
Grand Mesa, Colorado (left), and the triangulation error
(:numref:`triangulation_error`) in meters (right). The input images were
mapprojected (:numref:`mapproj-example`). No bundle adjustment was
used. Jitter (:numref:`jitter_solve`) is noticeable.
.. figure:: images/horizontal_vertical_error.png
:name: horizontal_vertical_error
Produced horizontal and vertical stddev values (left and right) for the
same dataset. It can be seen from the scales (units are in meter)
and comparing with :numref:`grand_mesa_dem_intersection_err` that these
errors vary little overall, and depend more on the geometry
of the stereo pair than the underlying terrain. See
:numref:`uncertainty_vs_triangulation_err` for a discussion.
.. _produced_covariances:
Definitions
-----------
The vertical variance of a triangulated point is defined as the
lower-right corner of the 3x3 NED covariance matrix (since x=North,
y=East, z=Down).
To find the horizontal variance component, consider the upper-left
:math:`2 \times 2` block of that matrix. Geometrically, the
horizontal covariances represent an ellipse. The radius of the circle
with the same area is found, which is the square root of the product
of ellipse semiaxes, which is the product of the eigenvalues of this
symmetric matrix, or its determinant. So, the horizontal component
of the covariance is defined as the square root of the upper-left
:math:`2 \times 2` bock of the NED covariance matrix.
The square root is taken to go from variance to stddev.
Theory
------
According to the theory of `propagation of uncertainty
`_, given a
function :math:`y = f(x)` between multi-dimensional spaces, the
covariances of the inputs and outputs are related via
.. math::
Cov_Y = J Cov_X J^T
Here, :math:`J` is the Jacobian of the function :math:`f` and
:math:`J^T` is its transpose. It is assumed that the uncertainties are
small enough that this function can be linearized around the nominal
location.
For this particular application, the input variables are either the
coordinates in the local horizontal ground plane having the
triangulated point (two real values for each camera), or the satellite
positions and orientations (quaternions), which are 7 real values for
each camera. The output is the triangulated point in the local
North-East-Down coordinates.
If the input uncertainties are stddev values, then these are squared,
creating variances, before being propagated (then converted back to
stddev values at the last step).
The Jacobian was computed using centered finite
differences, with a step size of 0.01 meters for the position and 1e-6
for the (normalized) quaternions. The computation was not particularly
sensitive to these step sizes. A much smaller position step size is
not recommended, since the positions are on the order of 7e6 meters,
(being measured from planet center) and because double precision
computations have only 16 digits of precision.
Validation for Maxar (DigitalGlobe) linescan cameras
----------------------------------------------------
The horizontal stddev values propagated through triangulation for Maxar
(DigitalGlobe) linescan cameras are usually on the order of 3 meters.
The obtained vertical stddev varies very strongly with the convergence
angle, and is usually, 5-10 meters, and perhaps more for stereo pairs
with a convergence angle under 30 degrees.
The dependence on the convergence angle is very expected. But these
numbers appear too large given the ground sample distance of
DigitalGlobe WorldView cameras. We are very confident that they are
correct. The results are so large is because of the input orientation
covariances (the relative contribution of input position and
orientation covariances can be determined with the options
``--position-covariance-factor`` and
``--orientation-covariance-factor``).
The curious user can try the following independent approach to
validate these numbers. The linescan camera files in XML format have
the orientations on lines with the ``ATTLIST`` field. The numbers on
that line are measurement index, then the quaternions (4 values, in
order x, y, z, w) and the upper-right half of the 4x4 covariance
matrix (10 numbers, stored row-wise).
The ``w`` variance (the last number), can be, for example, on the
order of 6.3e-12. Its square root, the standard deviation, which is
2.5e-6 or so, is the expected variability in the ``w`` component of
the quaternion.
Fetch and save the Python script `bias_dg_cam.py
`_. Invoke
it as::
python bias_dg_cam.py --position-bias "0 0 0" \
--orientation-bias "0 0 0 2.5e-6" \
-i left.xml -o left_bias.xml
python bias_dg_cam.py --position-bias "0 0 0" \
--orientation-bias "0 0 0 -2.5e-6" \
-i right.xml -o right_bias.xml
This will bias the positions and quaternions in the camera files by
the given amounts, creating ``left_bias.xml`` and
``right_bias.xml``. Note that values with different sign were used in
the two camera files. It is instructive to compare the original and
produced camera files side-by-side, and see the effect of using a
different sign and magnitude for the biases.
Then, ``parallel_stereo`` can be run twice, with different output
prefixes, first with the original cameras, and then the biased ones,
in both cases without propagation of errors. Use
``--left-image-crop-win`` and ``--right-image-crop-win``
(:numref:`stereo_gui`) to run on small clips only.
The created DEMs (with nominal and then with biased cameras) can have
their heights compared using the ``geodiff --absolute`` command
(:numref:`geodiff`). We found a height difference that is very similar
to the vertical standard deviation produced earlier.
================================================
FILE: docs/examples/apollo15.rst
================================================
.. _apollo15_example:
Apollo 15 Metric Camera images
------------------------------
Apollo Metric images were all taken at regular intervals, which means
that the same ``stereo.default`` can be used for all sequential pairs of
images. Apollo Metric images are ideal for stereo processing. They
produce consistent, excellent results.
The scans performed by ASU are sufficiently detailed to exhibit film
grain at the highest resolution. The amount of noise at the full
resolution is not helpful for the correlator, so we recommend
subsampling the images by a factor of 4.
Currently the tools to ingest Apollo TIFFs into ISIS are not available,
but these images should soon be released into the PDS for general public
usage.
Ansgarius C
~~~~~~~~~~~
Ansgarius C is a small crater on the west edge of the far side of the
Moon near the equator. It is east of Kapteyn A and B.
.. figure:: ../images/examples/metric/metric_ge_example_combined.png
:name: metric_example
Example output possible with Apollo Metric frames AS15-M-2380 and AS15-M-2381.
Commands
^^^^^^^^
Process Apollo TIFF files into ISIS.
::
ISIS> reduce from=AS15-M-2380.cub to=sub4-AS15-M-2380.cub \
sscale=4 lscale=4
ISIS> reduce from=AS15-M-2381.cub to=sub4-AS15-M-2381.cub \
sscale=4 lscale=4
ISIS> spiceinit from=sub4-AS15-M-2380.cub
ISIS> spiceinit from=sub4-AS15-M-2381.cub
ISIS> parallel_stereo sub4-AS15-M-2380.cub sub4-AS15-M-2381.cub \
result/output
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices.
================================================
FILE: docs/examples/aster.rst
================================================
.. _aster:
ASTER
-----
`Advanced Spaceborne Thermal Emission and Reflection Radiometer
`_ (ASTER)
is a Japanese instrument. ASP can process ASTER Level 1A VNIR images. These are
acquired with a stereo rig consisting of two cameras, pointing nadir and back. The
orbit is sun-synchronous, at an elevation of 705 km. The ground sample distance is 15
meters/pixel.
See a `ready-made ASTER example
`_.
It has the input images and cameras, ASP outputs, and instructions for how to
run it. Also see a `workbook with illustrations
`_.
ASP can correct for the jitter in these cameras (:numref:`jitter_aster`).
.. _aster_fetch:
Fetching the data
^^^^^^^^^^^^^^^^^
ASTER satellite images are freely available from:
https://search.earthdata.nasa.gov/search
When visiting that page, select a region on the map, search for ``AST_L1A,`` and
choose ``ASTER L1A Reconstructed Unprocessed Instrument Data V004``.
If too many results are shown, narrow down the choices by using a range in time
or deselecting unwanted items manually. Examining the data thumbnails is
helpful, to exclude those with clouds, etc. Then click to download.
As of end of 2025, the products can only be downloaded in the HDF-EOS format,
which requires an ASP build from 2026-01 or later (:numref:`release`).
Note that some datasets may not contain the bands 3B and 3N needed for stereo.
The EarthData web site also offers pre-existing ASTER Global DEM (GDEM)
products.
Data preparation
^^^^^^^^^^^^^^^^
In this example we will use the dataset::
AST_L1A_00404012022185436_20250920182851.hdf
around the San Luis Reservoir in Northern California.
This dataset contains all image data and metadata in single .hdf file. It can be
extracted with ``aster2asp`` (:numref:`aster2asp`) as::
aster2asp input.hdf -o out
Older V003 datasets were provided as zipped files containing data directories
with TIFF images and metadata as text files. In that case, after the data is
extracted, the preparation command is::
aster2asp dataDir -o out
In either case, four files would be produced, named::
out-Band3N.tif out-Band3B.tif out-Band3N.xml out-Band3B.xml
We refer again to the tool's documentation page regarding details of how
these files were created.
Open the images in ``stereo_gui`` (:numref:`stereo_gui`) as::
stereo_gui out-Band3N.tif out-Band3B.tif
and ensure that they are of good quality, or else get another dataset.
Stereo with raw images
^^^^^^^^^^^^^^^^^^^^^^
As of build 2026/3 (:numref:`release`), ASTER data are always processed with the
CSM model (:numref:`csm`).
Run ``parallel_stereo`` (:numref:`parallel_stereo`)::
parallel_stereo -t aster \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
out-Band3N.tif out-Band3B.tif \
out-Band3N.xml out-Band3B.xml \
out_stereo/run
This uses the ``asp_mgm`` algorithm, which is the most accurate algorithm ASP
has. One can also try the option ``--subpixel-mode 2`` which will be much slower
but produce better results.
See :numref:`nextsteps` for a discussion about various stereo algorithms and
speed-vs-quality choices.
This is followed by DEM creation with ``point2dem`` (:numref:`point2dem`)::
point2dem -r earth --auto-proj-center \
out_stereo/run-PC.tif
This will create a DEM named ``out_stereo/run-DEM.tif`` using an auto-guessed
local UTM or polar stereographic projection (:numref:`point2dem_proj`), with an
auto-guessed resolution (about 15 m / pixel, the image ground sample distance).
Visualize the DEM with ``stereo_gui`` (:numref:`stereo_gui`)::
stereo_gui --hillshade out_stereo/run-DEM.tif
Stereo with mapprojected images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To improve the results for steep terrain, one may consider doing stereo as
before, followed by mapprojection onto a coarser and smoother version of the
obtained DEM, and then redoing stereo with mapprojected images (per the
suggestions in :numref:`mapproj-example`).
Initial stereo::
parallel_stereo -t aster \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
out-Band3N.tif out-Band3B.tif \
out-Band3N.xml out-Band3B.xml \
out_stereo/run
Create a low-resolution smooth DEM at 200 meters/pixel::
point2dem -r earth --auto-proj-center \
--tr 200 out_stereo/run-PC.tif \
-o out_stereo/run-200m
Mapproject onto this DEM at 15 meters/pixel::
mapproject --tr 15 \
out_stereo/run-200m-DEM.tif \
out-Band3N.tif out-Band3N.xml out-Band3N_proj.tif
mapproject --tr 15 \
out_stereo/run-200m-DEM.tif \
out-Band3B.tif out-Band3B.xml out-Band3B_proj.tif
Run parallel_stereo with the mapprojected images::
parallel_stereo -t aster \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
out-Band3N_proj.tif out-Band3B_proj.tif \
out-Band3N.xml out-Band3B.xml \
out_stereo_proj/run \
out_stereo/run-200m-DEM.tif
Create the final DEM::
point2dem -r earth --auto-proj-center \
out_stereo_proj/run-PC.tif
It is very important to use the same resolution (option ``--tr``) for both
images when mapprojecting. That helps making the resulting images more similar
and reduces the processing time (:numref:`mapproj-res`).
One could consider mapprojecting at a higher resolution, for example, at 10
meters/pixel.
It is suggested to also create and inspect the triangulation error image
(:numref:`point2dem`). If it is large (comparable to ground sample distance),
the cameras should be bundle-adjusted first (:numref:`bundle_adjust`).
See :numref:`aster_dem_ortho_error` for an illustration.
Stereo with ortho-ready L1B images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
ASTER L1B images are also available. These are produced by projecting L1A images
onto the WGS84 ellipsoid at zero elevation. ASTER L1B images can be processed
with ASP using the workflow for ortho-ready images (:numref:`mapproj_ortho`).
Invoke ``parallel_stereo`` with the L1B images (already mapprojected), L1A
cameras, output prefix, and the option ``--ortho-heights 0 0``. The results are
nearly the same as obtained with L1A images.
.. _aster_csm:
The CSM model
^^^^^^^^^^^^^
An ASTER camera model consists of a sequence of satellite position samples and a
set of camera directions (sight vectors, in world coordinates), sampled at about
a dozen image rows and columns.
ASP fits a CSM linescan model (:numref:`csm`) on-the-fly to the ASTER sight
vectors and satellite positions. Instead of a set of directions on a grid, there
is one camera orientation at each satellite position sample. This enables
solving for jitter in ASTER cameras (:numref:`jitter_aster`).
The CSM model is produced by optimizing the optical center, focal length, and
camera orientations, to fit best the provided ASTER sight vectors. No ground
information is used, or stereo pair knowledge. The satellite positions do not
change.
The bundle adjustment program (:numref:`bundle_adjust`) will optimize and save
the produced CSM models (:numref:`csm_state`). To save the best-fit CSM models
with no further refinement, invoke this tool with zero iterations.
The CSM model may be further refined by tying together multiple datasets and
using ground constraints (:numref:`kaguya_ba`).
.. _aster_rpc:
Using the RPC model
^^^^^^^^^^^^^^^^^^^
ASTER XML files also contain RPC coefficients. These can be used with
``-t rpc`` (:numref:`rpc`) instead of ``-t aster``. The RPC model is an
approximation and less accurate than the exact linescan model described above.
Use the exact model (``-t aster``) for best results.
================================================
FILE: docs/examples/bathy.rst
================================================
.. include::
.. _bathy_intro:
Shallow-water bathymetry
------------------------
ASP supports creation of terrain models where parts of the terrain are
under water. Assuming that the water is shallow, still, clear, with
sufficient texture to match at the water bottom between the left and
right images, the rays emanating from the cameras and converging at
those features will be bent according to Snell's law at the water
interface, hence determining correctly the position of underwater
terrain.
The bathymetry module is evaluated in :cite:`palaseanu2021bathy` and
:cite:`palaseanu2023`.
Software considerations
~~~~~~~~~~~~~~~~~~~~~~~
ASP supports the bathymetry mode only with the ``dg``, ``rpc``, and
``nadirpinhole`` sessions, so with Digital Globe linescan cameras, RPC cameras,
and pinhole cameras (:numref:`bathy_non_dg`), all for Earth, with the WGS84
datum. Both raw and mapprojected images can be used (:numref:`bathy_map`), with
or without bundle adjustment or alignment (:numref:`bathy_ba_align`).
Physics considerations
~~~~~~~~~~~~~~~~~~~~~~
Shallow water does not appear equally transparent at all wavelengths,
which will affect the quality of the results. While the process we
outline below will work, in principle, with any data, we will focus on
stereo with the G band (green, stored at band 3) of Digital Globe
multispectral imagery, and we will use the N band (near-infrared 1,
stored at band 7), to determine a mask of the ground vs water.
These or any other bands can be extracted from a multi-band image as follows:
::
b=3
gdal_translate -b ${b} -co compress=lzw -co TILED=yes \
-co BLOCKXSIZE=256 -co BLOCKYSIZE=256 \
input.TIF input_b${b}.tif
The extra options, in addition to ``-b ${b}`` to extract a given band,
are needed to create a compressed and tiled output image, which helps
with the performance of ASP later.
.. _bathy_thresh:
Computation of the water-land threshold
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order to distinguish points on land from those under water, a
mask needs to be computed for each of the left and right input images,
with the masks having the same dimensions as those images.
A simple way of creating such a mask is to first determine a threshold
such that pixels at or below threshold are under water, and those
above threshold are on land.
It was experimentally found that it is best to use band 7 (near
infrared) for Digital Globe multispectral images to find this water
threshold, as in them the water appears universally darker than the
land.
Other methods are available for such thresholding and masking. See
:numref:`bathy_water_masking`.
ASP provides two tools for finding the threshold in automated way
based on histogram analysis. One is ``bathy_threshold_calc.py``
(:numref:`bathy_threshold_calc`), and the second one is
``otsu_threshold`` (:numref:`otsu_threshold`). This last tool produces
a somewhat higher threshold compared to the other one, but in practice
the results with both approaches are very similar.
The ``bathy_threshold_calc.py`` program works based on the observation
that, since in such an image the water appears darker than the land,
then in a histogram of the pixels in the image, the water and land
appear as two noticeable peaks, with a good value for the threshold
then being the image value at the bottom of the valley between those
peaks.
For robustness to noise, this histogram is approximated by a
kernel-density estimate (``KDE``) using Gaussian kernels. It is very
important to note that even then this tool may return the wrong
minimum, which it assumes to be the first one.
Therefore, this tool plots the histogram, its kernel density estimate,
the positions of the minima, and prints their locations on screen. The
user is responsible for validating visually where the most appropriate
position of the minimum is (along the horizontal axis).
The kernel-density estimate calculation is very time-consuming for
large images, hence it is suggested to pass to the tool the number of
samples to use (it will pick the samples uniformly in the image). For
example, if a million samples are used, the calculation should take
a few minutes to complete.
This program can be invoked for each of the left and right images as follows:
::
~/miniconda3/envs/bathy/bin/python $(which bathy_threshold_calc.py) \
--image left.tif --num-samples 1000000
Here it is assumed that ASP's ``bin`` directory is in the path. The ``bathy``
conda environment should be installed as described in
:numref:`bathy_threshold_calc`.
It is suggested to experiment a bit with the number of samples, using,
for example, double of this amount, and see the difference. Normally
the outcome should be rather similar.
It will produce the following output:
::
Image file is left.tif
Number of samples is 1000000
Number of image rows and columns: 7276, 8820
Picking a uniform sample of dimensions 908, 1101
Please be patient. It may take several minutes to find the answer.
Positions of the minima: [ 155.18918919 802.7027027 ... ]
Suggested threshold is the position of the first minimum: 155.1891891891892
Please verify with the graph. There is a chance the second minimum may work better.
Elapsed time in seconds: 275.2
.. figure:: ../images/examples/bathy/bathy_threshold_calc.png
:name: bathy_water_threshold_example
Example of the graph plotted by bathy_threshold_calc.py
Once the threshold is found, ``stereo_gui`` can be used to visualize
the regions at or below threshold (:numref:`thresh`).
.. _bathy_mask_creation:
Creation of masks based on the threshold
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Having determined the water-land threshold, the left and right image
masks will be found from the corresponding images as follows:
::
left_thresh=155.1891891891892
image_calc -c "gt(var_0, $left_thresh, 1, 0)" -d float32 \
left_b7.tif -o left_mask.tif
Here, ``left_b7.tif`` is suggestive of the fact that the band 7 of
WorldView multispectral imagery was used.
It is important to remember to use the right image threshold when repeating
this process for the right image.
The ``image_calc`` tool (:numref:`image_calc`) produces a binary mask, with 1
for land (values strictly larger than the threshold) and 0 for water (values at
or below the threshold).
If using a spectral index where water has higher values than land
(like NDWI), the polarity is reversed (use the ``lt`` operator instead
of ``gt``). See :numref:`bathy_water_masking` for details.
Later, when doing stereo, if, based on the masks, a pixel in the left
image is under water, while the corresponding pixel in the right image
is not, for noise or other reasons, that pixel pair will be declared
to be on land and hence no bathymetry correction will take place for
this pair. Hence, some inspection and potentially cleanup of the
masks may be necessary.
.. _water_surface:
Determination of the water surface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order to run stereo and properly triangulate the rays which
may intersect under water, it is necessary to determine
the water surface. Since for images of large extent the Earth
curvature will be important, this surface will be found as a plane
in a local stereographic projection.
The procedure for this is described in :numref:`bathy_plane_calc`.
Stereo with bathymetry correction
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Having these in place, stereo can then happen as follows:
::
parallel_stereo -t dg \
left.tif right.tif \
left.xml right.xml \
--left-bathy-mask left_mask.tif \
--right-bathy-mask right_mask.tif \
--stereo-algorithm asp_mgm \
--refraction-index 1.34 \
--bathy-plane bathy_plane.txt \
run_bathy/run
Here we specified the two masks, the water index of refraction, and the water
plane found before. Pixels classified as water must have non-positive value
or be no-data in the mask, while land pixels must have positive value.
See :numref:`nextsteps` for a discussion about various
speed-vs-quality choices.
This is followed by creating a DEM (:numref:`point2dem`)::
point2dem run_bathy/run-PC.tif --orthoimage run_bathy/run-L.tif
The water refraction index was set 1.34 :cite:`jerlov1976marine`. Alternatively,
one could use 1.333 :cite:`thormahlen1985refractive,harvey1998calibration`, or a
more precise value that depends on wavelength, temperature, and if having
saltwater or freshwater (`Parrish (2020)
`_,
:cite:`austin1976index,mobley1995optical`). For example, using the equation and
coefficients found in Parrish (2020), and the green wavelength for saltwater,
the water refraction index is 1.340125 when the water temperature is 27 |deg| C
(this was applied to a Florida Keys test site for the month of May).
The refraction index can be computed with the :ref:`refr_index` program.
The obtained point cloud will have both triangulated points above water,
so with no correction, and below water, with the correction applied.
If desired to have only one of the two, call the ``parallel_stereo`` command
with the option ``--output-cloud-type`` with the value ``topo``
or ``bathy`` respectively (the default for this option is ``all``).
The bathymetry correction happens at the triangulation stage
(though the necessary transformations on the bathymetry masks are done
in pre-processing). Hence, after a stereo run finished, it is only
necessary to re-run the ``stereo_tri`` step if desired to apply this
correction or not, or if to change the value of
``--output-cloud-type``.
As in usual invocations of stereo, the input images may be
mapprojected, and then a DEM is expected, stereo may happen only in
certain regions as chosen in the GUI, bundle adjustment may be used,
the output point cloud may be converted to LAS, etc.
.. _bathy_validation:
Performing sanity checks on a bathy run
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The results produced with the bathymetry mode for stereo need careful
validation. Here we will show how to examine if the water-land
boundary and corresponding water surface were found correctly.
Before that, it is important to note that such runs can take a long
time, and one should try to first perform a bathymetry experiment
in a carefully chosen small area by running ``stereo_gui`` instead of
``parallel_stereo``, while keeping the rest of the bathy options as
above, and then selecting clips in the left and right images with the
mouse to run ``parallel_stereo`` on. See :numref:`stereo_gui` for more
info.
If the ``bathy_plane_calc`` is run with the option::
--output-inlier-shapefile inliers.shp
it will produce a shapefile for the inliers.
Create an orthoimage from the aligned bathy mask, for example,
such as::
point2dem --no-dem run_bathy/run-PC.tif \
--orthoimage run_bathy/run-L_aligned_bathy_mask.tif \
-o run_bathy/run-bathy_mask
This should create ``run_bathy/run-bathy_mask-DRG.tif``.
This should be overlaid in ``stereo_gui`` on top of the inliers
from the bathy plane calculation, as::
stereo_gui --single-window --use-georef inliers.shp \
run_bathy/run-bathy_mask-DRG.tif
The inliers should be well-distributed on the land-water interface
as shown by the mask.
To verify that the water surface was found correctly, one can
create a DEM with no bathymetry correction, subtract from that one the
DEM with bathymetry correction, and colorize the result. This can be
done by redoing the triangulation in the previous run, this time with
no bathy information::
mv run_bathy/run-DEM.tif run_bathy/run-yesbathy-DEM.tif
parallel_stereo -t dg left.tif right.tif left.xml right.xml \
--stereo-algorithm asp_mgm \
--entry-point 5 run_bathy/run
point2dem run_bathy/run-PC.tif -o run_bathy/run-nobathy
Note that we started by renaming the bathy DEM. The result of these
commands will be ``run_bathy/run-nobathy-DEM.tif``. The differencing
and colorizing is done as::
geodiff run_bathy/run-nobathy-DEM.tif \
run_bathy/run-yesbathy-DEM.tif -o run_bathy/run
colormap --min 0 --max 1 run_bathy/run-diff.tif
The obtained file, ``run_bathy/run-diff_CMAP.tif``, can be added to
the ``stereo_gui`` command from above. Colors hotter than blue will be
suggestive of how much the depth elevation changed as result of
bathymetry correction. It is hoped that no changes will be seen on
land, and that the inliers bound well the region where change of depth
happened.
.. _bathy_ba_align:
Bundle adjustment and alignment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is important to note that we did not use bundle adjustment
(:numref:`bundle_adjust`) or ``pc_align`` (:numref:`pc_align`) for
alignment. That is possible, but then one has to ensure that all the
data are kept consistent under such operations.
In particular, running bundle adjustment on a PAN image pair, and then
on a corresponding multispectral band pair, will result in DEMs which
are no longer aligned either to each other, or to their versions
before bundle adjustment. The cameras can be prevented by moving too
far if ``bundle_adjust`` is called with, for example, ``--tri-weight
0.1``, or some other comparable value, to constrain the triangulated
points. Yet, by its very nature, this program changes the positions
and orientations of the cameras, and therefore the coordinate
system. And note that a very high camera weight may interfere with the
convergence of bundle adjustment.
It is suggested to use these tools only if a trusted reference dataset
exists, and then the produced DEMs should be aligned to that dataset.
ASP build 2026-01 or newer (:numref:`release`) supports modeling bathymetry
during bundle adjustment (:numref:`ba_bathy`).
Only the "topo" component of a DEM obtained with ASP should be used
for alignment (see ``--output-cloud-type``), that is, the part above
water, as the part under water can be quite variable given the water
level. Here's an example for creating the "topo" DEM, just the
triangulation stage processing needs modification::
parallel_stereo -t dg left.tif right.tif left.xml right.xml \
--stereo-algorithm asp_mgm \
\
--entry-point 5 --output-cloud-type topo \
run_bathy/run
point2dem run_bathy/run-PC.tif -o run_bathy/run-topo
which will create ``run_bathy/run-topo-DEM.tif``.
Then, after the "topo" DEM is aligned, the alignment transform can be
applied to the full DEM (obtained at triangulation stage with
``--output-cloud-type all``), as detailed in :numref:`prevtrans`. The
input cameras can be aligned using the same transform
(:numref:`ba_pc_align`).
When the water surface is determined using a DEM, a mask of the image
portion above water, and corresponding camera, and the cameras have
been bundle-adjusted or aligned, the option ``--bundle-adjust-prefix``
must be used with ``bathy_plane_calc`` (see
:numref:`bathy_plane_raw_img`).
Validation of alignment
~~~~~~~~~~~~~~~~~~~~~~~
It is very strongly suggested to use visual inspection in
``stereo_gui`` and the ``geodiff`` and ``colormap`` tools for
differencing DEMs to ensure DEMs that are meant to be aligned have
small differences. Since bathymetry modeling can measure only very
shallow water depths, any misalignment can result in big errors in
final results.
If DEMs have parts under water and it is desired to remove those for
the purpose of alignment, one can take advantage of the fact that the
water height is roughly horizontal. Hence, a command like::
height=-21.2
image_calc -c "max($height, var_0)" -d float32 \
--output-nodata-value $height \
dem.tif -o topo_dem.tif
will eliminate all heights under -21.2 meters (relative to the
datum ellipsoid).
Bathymetry with changing water level
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If the left and right images were acquired at different times,
the water level may be different among the two, for example because
of the tide. Then, ``bathy_plane_calc`` (:numref:`bathy_plane_calc`)
can be used independently for the left and right images, obtaining
two such surfaces. These can be passed to ASP as follows::
parallel_stereo --bathy-plane "left_plane.txt right_plane.txt" \
--stereo-algorithm asp_mgm \
The computation will go as before until the triangulation stage.
There, the rays emanating from the cameras will bend when meeting the
water at different elevations given by these planes, and their
intersection may happen in three possible regimes (above both planes,
in between them, or below both of them).
Care must be taken when doing stereo with images acquired at a different
times as the illumination may be too different. A good convergence
angle is also expected (:numref:`stereo_pairs`).
.. _bathy_reuse_run:
How to reuse most of a run
~~~~~~~~~~~~~~~~~~~~~~~~~~
Stereo can take a long time, and the results can have a large size on
disk. It is possible to reuse most of such a run, using the option
``--prev-run-prefix``, if cameras or camera adjustments (option
``--bundle-adjust-prefix``) get added, removed, or change, if the
water surface (``--bathy-plane``) or index of refraction change, or if
the previous run did not do bathy modeling but the new run does (hence
the options ``--left-bathy-mask`` and ``--right-bathy-mask`` got
added).
One must not change ``--left-image-crop-win`` and
``--right-image-crop-win`` in the meantime, if used, as that may
invalidate the intermediate files we want to reuse, nor the input
images. If the previous run did employ bathy masks, and it is desired
to change them (rather than add them while they were not there
before), run the ``touch`` command on the new bathy masks, to give
the software a hint that the alignment of such masks should be redone.
As an example, consider a run with no bathymetry modeling::
parallel_stereo -t dg left.tif right.tif left.xml right.xml \
--stereo-algorithm asp_mgm \
run_nobathy/run
A second run, with output prefix ``run_bathy/run``, can be started
directly at the triangulation stage while reusing the earlier stages
from the other run as::
parallel_stereo -t dg left.tif right.tif left.xml right.xml \
--stereo-algorithm asp_mgm \
--left-bathy-mask left_mask.tif --right-bathy-mask right_mask.tif \
--refraction-index 1.34 --bathy-plane bathy_plane.txt \
--bundle-adjust-prefix ba/run run_yesbathy/run \
--prev-run-prefix run_nobathy/run
The explanation behind the shortcut employed above is that the precise
cameras and the bathy info are fully used only at the triangulation
stage. That because the preprocessing step (step 0), mostly does
alignment, for which some general knowledge of the cameras and bathy
information is sufficient, and other steps, before triangulation, work
primarily on images. This option works by making symbolic links
to files created at previous stages of stereo which are needed at
triangulation.
Note that if the cameras changed, the user must recompute the bathy
planes first, using the updated cameras. The ``bathy_plane_calc`` tool
which is used for that can take into account the updated cameras via
the ``--bundle-adjust-prefix`` option passed to it.
If the software notices that the current run invoked with ``--prev-run-prefix``
employs bathy masks, unlike that previous run, or that the modification time of
the bathy masks passed in is newer than of files in that run, it will ingest and
align the new masks before performing triangulation.
If the cameras change notably, it may be suggested to redo the run from scratch.
.. _bathy_map:
Bathymetry correction with mapprojected images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Mapprojecting the input images can improve the results on steep slopes
(:numref:`mapproj-example`). While that may not be a big concern in
bathymetry applications, this section nevertheless illustrates how
stereo with shallow water can be done with mapprojection.
Given an external DEM, the left and right images can be mapprojected
onto this DEM, for example as:
::
mapproject external_dem.tif --tr gridSize \
left.tif left.xml left_map.tif
and the same for the right image. The same ground sample distance
(resolution) must be used for left and right images, which should be
appropriately chosen depending on the data (:numref:`mapproj-res`).
One should mapproject the same way the left and right band 7 Digital
Globe multispectral images (if applicable), obtaining two images,
``left_map_b7.tif`` and ``right_map_b7.tif``. These two can be used to
find the masks, as earlier:
::
left_thresh=155.1891891891892
image_calc -c "max($left_thresh, var_0)" \
--output-nodata-value $left_thresh \
left_map_b7.tif -o left_map_mask.tif
(and the same for the right image.)
The threshold determined with the original non-mapprojected images
should still work, and the same water plane can be used.
Then, stereo happens as above, with the only differences being the
addition of the external DEM and the new names for the images and the
masks:
::
parallel_stereo -t dg left_map.tif right_map.tif \
left.xml right.xml \
--stereo-algorithm asp_mgm \
--left-bathy-mask left_map_mask.tif \
--right-bathy-mask right_map_mask.tif \
--refraction-index 1.34 \
--bathy-plane bathy_plane.txt \
run_map/run external_dem.tif
Using Digital Globe PAN images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The bathymetry mode can be used with Digital Globe PAN images as well,
though likely the water bottom may not be as transparent in this case
as for the Green band.
Yet, if desired to do so, a modification is necessary if the mask
for pixels above water is obtained not from the PAN image itself,
but from a band of the corresponding multispectral image,
because those are acquired with different sensors.
Starting with a multispectral image mask, one has to first increase
its resolution by a factor of 4 to make it comparable to the PAN
image, then crop about 50 columns on the left, and further crop or
extend the scaled mask to match the PAN image dimensions.
ASP provides a tool for doing this, which can be called as::
scale_bathy_mask.py ms_mask.tif pan_image.tif output_pan_mask.tif
Any warnings about ``srcwin ... falls partially outside raster
extent`` should be ignored. GDAL will correctly pad the scaled mask
with no-data values if it has to grow it to match the PAN image.
To verify that the PAN image and obtained scaled PAN mask agree,
overlay them in ``stereo_gui``, by choosing from the top menu the
option ``View->Single window``.
It is not clear if the number of columns to remove on the left should
be 50 or 48 pixels. It appears that 50 pixels works better for WV03
while 48 pixels may be appropriate for WV02. These were observed
to result in a smaller shift among these images. The default is 50.
If desired to experiment with another amount, pass that one
as an additional argument to the tool, after the output PAN mask.
.. _bathy_non_dg:
Using non-Digital Globe images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Stereo with bathymetry was tested with RPC cameras. In fact, the
above examples can be re-run by just replacing ``dg`` with ``rpc`` for
the ``-t`` option. (It is suggested that the shoreline shapefile and
the water plane be redone for the RPC case. It is expected that the
results will change to a certain extent.)
Experiments were also done with pinhole cameras (using the
``nadirpinhole`` session) with both raw and mapprojected images, and
using the alignment methods 'epipolar', 'affineepipolar',
'homography', and 'none', giving plausible results.
Effect of bathymetry correction on the output DEM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is instructive to compare the DEMs with and without the bathymetry
correction.
The bathymetry correction results in the points in the output
triangulated cloud being pushed "down", as the rays emanating from the
cameras become "steeper" after meeting the water.
Yet, a DEM is obtained by binning and doing weighted averaging of the
points in the cloud. It can happen that with the bathymetry
correction on, a point may end up in a different bin than with it off,
with the result being that a handful of heights in the
bathymetry-corrected DEM can be slightly above the same heights in the
DEM without the correction, which is counter-intuitive.
This however will happen only close to the water-land interface and is
an expected gridding artifact. (A different DEM grid size may result
in the artifacts changing location and magnitude.)
================================================
FILE: docs/examples/cassini.rst
================================================
.. _cassini_example:
Cassini ISS NAC
---------------
This is a proof of concept showing the strength of building the Stereo
Pipeline on top of ISIS. Support for processing ISS NAC stereo pairs was
not a goal during our design of the software, but the fact that a camera
model exists in ISIS means that it too can be processed by the Stereo
Pipeline.
Identifying stereo pairs from spacecraft that do not orbit their target
is a challenge. We have found that one usually has to settle with images
that are not ideal: different lighting, little perspective change, and
little or no stereo parallax. So far we have had little success with
Cassini's data, but nonetheless we provide this example as a potential
starting point.
Rhea
~~~~
Rhea is the second largest moon of Saturn and is roughly a third the
size of our own Moon. This example shows, at the top right of both
images, a giant impact basin named Tirawa that is 220 miles across. The
bright white area south of Tirawa is ejecta from a new crater. The lack
of texture in this area poses a challenge for our correlator. The
results are just barely useful: the Tirawa impact can barely be made out
in the 3D data while the new crater and ejecta become only noise.
.. figure:: ../images/examples/cassini/cassini_rhea_quad.png
:name: cassini-example
Example output of what is possible with Cassini's ISS NAC. Upper left:
original left image. Upper right: original right image. Lower left:
mapprojected left image. Lower right: 3D Rendering of the point cloud.
Commands
^^^^^^^^
Download the N1511700120_1.IMG and W1567133629_1.IMG images and their
label (.LBL) files from the PDS.
::
ISIS> ciss2isis f=N1511700120_1.LBL t=N1511700120_1.cub
ISIS> ciss2isis f=W1567133629_1.LBL t=W1567133629_1.cub
ISIS> cisscal from=N1511700120_1.cub to=N1511700120_1.lev1.cub
ISIS> cisscal from=W1567133629_1.cub to=W1567133629_1.lev1.cub
ISIS> fillgap from=W1567133629_1.lev1.cub to=W1567133629_1.fill.cub
(Note the optional ``fillgap`` command above.)
::
ISIS> cubenorm from=N1511700120_1.lev1.cub to=N1511700120_1.norm.cub
ISIS> cubenorm from=W1567133629_1.fill.cub to=W1567133629_1.norm.cub
ISIS> spiceinit from=N1511700120_1.norm.cub
ISIS> spiceinit from=W1567133629_1.norm.cub
ISIS> cam2map from=N1511700120_1.norm.cub to=N1511700120_1.map.cub
ISIS> cam2map from=W1567133629_1.norm.cub map=N1511700120_1.map.cub \
ISIS> to=W1567133629_1.map.cub matchmap=true
ISIS> parallel_stereo N1511700120_1.map.equ.cub \
W1567133629_1.map.equ.cub result/rhea
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices.
Also consider the following modifications to ``stereo.default``::
### PREPROCESSING
alignment-method none
force-use-entire-range
individually-normalize
### CORRELATION
prefilter-mode 2
prefilter-kernel-width 1.5
cost-mode 2
corr-kernel 25 25
corr-search -55 -2 -5 10
subpixel-mode 3
subpixel-kernel 21 21
### FILTERING
rm-half-kernel 5 5
rm-min-matches 60 # Units = percent
rm-threshold 3
rm-cleanup-passes 1
================================================
FILE: docs/examples/chandrayaan2.rst
================================================
.. _chandrayaan2:
Chandrayaan-2 lunar orbiter
---------------------------
The example here shows how to create a 3D terrain model with `Chandrayaan-2 lunar
orbiter `_ data. We will work with
the *Orbiter High Resolution Camera* (OHRC). A *Terrain Mapping Camera-2* (TMC-2)
example will be added at a later time.
For the moment this exercise works only much additional work. It needs ASP
3.6.0 (:numref:`release`), `ISIS `_ 9.0.0,
`ALE `_ (compiled and installed
from source to a separate location), SPICE kernels from the `ISRO Science Data
Archive `_, and
custom addendum (``iak``) directories for ISIS data be set up.
This is *not ready for general use* until the kernels are released in the ISIS
data area, but is provided for reference.
Orbiter High Resolution Camera
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The OHRC instrument is a high-resolution camera with a 0.25 m ground sample
distance (GSD). It can adjust its look angle and acquire stereo pairs
(:numref:`stereo_pairs`).
Fetching the data
^^^^^^^^^^^^^^^^^
Raw and calibrated images for OHRC and TMC-2 cameras, as well as orthoimages and
Digital Elevation Models (DEMs) produced from TMC-2 camera data, can be
downloaded from `ISRO `_.
The first step when using that portal is selecting the appropriate projection
for displaying the image footprints. Then, choose the instrument (OHRC or
TMC-2), data type (calibrated is suggested, but raw may do), and the area of
interest.
We selected the region of interest to be between 20 and 21 degrees in longitude,
and -70 to -67 degrees in latitude. The OHRC stereo pair we downloaded consisted
of images with the prefixes::
ch2_ohr_nrp_20200827T0030107497_d_img_d18
ch2_ohr_nrp_20200827T0226453039_d_img_d18
We also got a TMC-2 orthoimage and corresponding DEM with the prefixes::
ch2_tmc_ndn_20231101T0125121377_d_oth_d18
ch2_tmc_ndn_20231101T0125121377_d_dtm_d18
These are at lower resolution but useful for context.
.. figure:: ../images/chandrayaan2_ohrc_tmc.png
From left to right: The first and second OHRC images, and their approximate
extent in the (many times larger) TMC-2 ortho image. Note that the illumination
in the TMC-2 ortho image is very different.
Preprocessing
^^^^^^^^^^^^^
Each calibrated image dataset has ``.img`` and ``.xml`` files, with raw data and
a PDS-4 label. It will be convenient to rename these to ``ohrc/img1.img`` and
``ohrc/img1.xml`` for the first OHRC dataset, and analogously for the second
one.
The `isisimport `_ command converts the raw image to a .cub file::
isisimport \
from = ohrc/img1.xml \
to = ohrc/img1.cub \
template = ${template}
(and same for the second image). Here, the ``template`` variable is set such
as::
template=/path/to/ISIS3/isis/appdata/import/PDS4/Chandrayaan2OHRC.tpl
In ISIS 9.0.0 likely the template parameter is optional and the template should
be auto-detected.
The ``isisimport`` command only works with raw images and not with ortho images.
If this command fails with a message about not being able to find a field in the
input xml file, it is suggested to edit that file and add a made-up entry for
that field. This is a temporary workaround for the problem of Chandrayaan-2 xml
files being rather diverse in what fields they record.
The SPICE kernels are attached with `spiceinit `_::
spiceinit from = ohrc/img1.cub
This expects the SPICE kernels for Chandrayaan-2 to exist locally (see the download link
above). For more information on ISIS data, see :numref:`planetary_images` and the
links from there.
Next, the CSM cameras are created (:numref:`csm`). This makes use of the `isd_generate `_ program installed with the latest ALE built from source (link above). The command is::
export ALESPICEROOT=$ISISDATA
isd_generate -k ohrc/img1.cub ohrc/img1.cub
and same for ``img2.cub``. Here the .cub file is specified twice, with the
first file needed to read the SPICE kernels.
It is suggested to do a quick check on the produced ``ohrc/img1.json`` camera
with ``cam_test`` (:numref:`cam_test`).
The images can be inspected with ``stereo_gui`` (:numref:`stereo_gui`), as::
stereo_gui ohrc/img1.cub ohrc/img2.cub
The resulting cub files are very large, on the order of 12,000 x 101,075 pixels.
For exploratory work, these can be cropped, with the ISIS `crop
`_
command, such as::
crop \
from = ohrc/img1.cub \
to = ohrc/img1_crop.cub \
sample = 1 \
line = 1 \
nsamples = 12000 \
nlines = 50000
It is very important to ensure that the upper-left pixel (1, 1) is part of the
crop region, as otherwise the resulting images will be inconsistent with the CSM
camera models.
Bundle adjustment
^^^^^^^^^^^^^^^^^
We found that these images have notable pointing error, so bundle adjustment
(:numref:`bundle_adjust`) is needed::
bundle_adjust \
ohrc/img1_crop.cub ohrc/img2_crop.cub \
ohrc/img1.json ohrc/img2.json \
--ip-per-image 30000 \
-o ba/run
This stereo pair was seen to have a decent convergence angle of 25 degrees
(:numref:`ba_conv_angle`).
.. figure:: ../images/chandrayaan2_ohrc_interest_points.png
The left and right cropped OHRC images, and the interest point matches between
them (as shown by ``stereo_gui``, :numref:`stereo_gui_view_ip`).
Stereo
^^^^^^
Next, we invoked ``parallel_stereo`` (:numref:`parallel_stereo`) to create a point cloud::
parallel_stereo \
--stereo-algorithm asp_mgm \
--clean-match-files-prefix ba/run \
ohrc/img1_crop.cub \
ohrc/img2_crop.cub \
ba/run-img1.adjusted_state.json \
ba/run-img2.adjusted_state.json \
stereo/run
A DEM, orthoimage, and triangulation error image are made with ``point2dem``
(:numref:`point2dem`), as::
point2dem \
--tr 1.0 \
--errorimage \
stereo/run-PC.tif \
--orthoimage \
stereo/run-L.tif
In a recent version of ASP these will, by default, have a local stereographic
projection.
.. figure:: ../images/chandrayaan2_ohrc_dem_ortho_err.png
From left to right: Produced OHRC DEM (range of heights is 304 to 650 meters),
orthoimage, and triangulation error image (blue = 0 m, red = 0.5 m). There is
notable jitter, whose magnitude is on the order of image GSD (0.25 m), which
is rather high, but which could be corrected (:numref:`jitter_solve`). Some
unmodeled lens distortion also seems evident, which could be solved for
(:numref:`kaguya_ba`).
Alignment
^^^^^^^^^
We will align the produced OHRC DEM to `LOLA
`_, which is the
usual global reference coordinate system for the Moon.
The OHRC DEM turned out to be shifted relative to LOLA by about 4 km along the
satellite track, which resulted in failure to align with ``pc_align``
(:numref:`pc_align`).
Manual alignment was first performed (:numref:`manual-align`). The inputs were
the OHRC DEM and a LOLA point cloud, after gridding both with a 10 m grid size
and the same projection with ``point2dem``, and manually picking a few
visually similar features. That brought the cloud notably closer, and the output
transform from that alignment was used for aligning the full clouds as::
pc_align \
--max-displacement 250 \
--initial-transform init-transform.txt \
--csv-format 2:lon,3:lat,4:radius_km \
--save-inv-transformed-reference-points \
stereo/run-DEM.tif lola/lola.csv \
-o stereo/run-align
.. figure:: ../images/chandrayaan2_ohrc_lola.png
The difference between the aligned OHRC DEM and LOLA point cloud. Blue: -5 m,
red = 5 m. Given that the DEM, in principle, should have a vertical
uncertainty of under 1 m, this could be better, but at least one is in the
ballpark.
A terrain model created with the lower-resolution TMC-2 images would likely be
easier to align to LOLA, as it would have a much bigger extent.
================================================
FILE: docs/examples/change3.rst
================================================
.. _change3:
Chang'e 3 landing camera
------------------------
This example discusses processing `Chang'e 3
`_ landing camera images. This camera
was mounted at the bottom of the lander and acquired images during the descent
phase.
The images we inspected had a very small convergence angle
(:numref:`stereo_pairs`), of under 1.5 degrees, which resulted in an unreliable
terrain model.
Here we show how these images can be precisely registered to an LRO NAC image
(:numref:`lronac-example`), how to refine the landing camera intrinsics
including lens distortion, and how to produce an aligned terrain model from a
stereo pair between a Chang'e landing camera image and an LRO NAC image
with similar illumination.
Fetching the Chang'e 3 images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The `Chang'e3 landing video `_ is
very helpful when it comes to deciding which images to process.
The images are available from China's `Lunar Planetary Data Release System
`_. Select the
``CE3`` mission, the ``LCAM`` instrument, and the ``2A`` data level. The images
have names of the form::
CE3_BMYK_LCAM-1801_SCI_N_20131214130807_20131214130807_0001_A.2A
The data is in the PDS3 format, with a plain text metadata header followed by a binary image.
The images can be converted to TIF with ``gdal_translate`` (:numref:`gdal_tools`)
as::
mkdir -p img
gdal_translate CE3_BMYK_LCAM-1801*.2A img/1801.tif
Here, ``1801`` is the index in the sequence. We stored the result in the ``img``
directory.
.. figure:: ../images/change3_images.png
Landing camera images 1801, 1831, 1861, and 1891. A crater seen in all four images
is highlighted with a red box.
Some parts of the lander body are seen in the foreground. Most of those
artifacts can be masked with ``image_calc`` (:numref:`image_calc`) with a
command as::
image_calc -c "max(var_0,70)" \
--output-nodata-value 70 \
-d float32 \
img/1801.tif \
-o img/1801_mask.tif
This sets to no-data any pixel values not exceeding 70.
A more careful processing could be done by opening an image in an image editor,
manually setting to black (zero pixel value) all undesirable pixels, and then
using the ``image_calc`` ``sign()`` function to create a mask of invalid (value
0) and valid (value 1) pixels. Those could be applied to each image by
multiplication, with ``image_calc`` with the option ``--output-nodata-value 0``.
The same mask would work for all of them.
LRO NAC data
~~~~~~~~~~~~
The Chang'e 3 images will be registered against LRO NAC images. These are larger,
with known camera information, and at higher resolution.
It was quite tricky to find an LRO NAC image with similar illumination. This
required mapprojecting many such images and visual inspection. We settled on image
``M1154358210RE``.
How to download and prepare LRO NAC images, including the application of ``lronaccal``
and ``lronacecho``, is described in :numref:`lronac-example`. A CSM camera model
can be produced as in :numref:`create_csm_linescan`. The resulting datasets will
be called ``img/lro.cub`` and ``img/lro.json``.
We will also fetch an `LRO NAC DEM
`_
produced specifically for this landing site. We call it ``ref/ref.tif``.
The LRO NAC images are very large, and sometimes also scanned in reverse
direction, appearing mirror-flipped. This can result in failure in finding
matching features for registration. To make the work easier, we will mapproject
the needed image portion onto this DEM.
Since these two datasets are not explicitly co-registered, we will blur the DEM
for mapprojection quite a bit to lessen the effect of artifacts due to
misregistration. Later, for alignment to the ground, we will use the original
DEM.
The blur is done with ``dem_mosaic`` (:numref:`dem_mosaic`) as::
dem_mosaic --dem-blur-sigma 10 \
ref/ref.tif -o ref/ref_blur.tif
Define the extent on the ground and the projection::
win="3497495 1340957 3503625 1334379"
proj="+proj=eqc +lat_ts=44 +lat_0=0 +lon_0=180 +x_0=0 +y_0=0 +R=1737400 +units=m +no_defs"
Then, the mapprojection (:numref:`mapproject`) step follows::
mapproject --tr 2.0 \
--t_projwin $win \
--t_srs "$proj" \
ref/ref_blur.tif \
img/lro.cub \
img/lro.json \
img/lro.map.tif
The grid size of 2 meters was chosen to be similar to the resolution of the
Chang'e 3 images.
GCP creation
~~~~~~~~~~~~
We will find interest point matches between the Chang'e 3 and LRO NAC images,
based on which we will compute GCP (:numref:`bagcp`), that will be later used to
infer an approximate position and orientation of the Chang'e 3 camera at the
time of image acquisition.
GCP are found with the ``gcp_gen`` program (:numref:`gcp_gen`) as::
gcp_gen \
--ip-detect-method 1 \
--inlier-threshold 100 \
--ip-per-tile 20000 \
--gcp-sigma 100 \
--individually-normalize \
--camera-image img/1801_mask.tif \
--ortho-image img/lro.map.tif \
--dem ref/ref.tif \
--output-prefix run/run \
-o gcp/gcp_1801.gcp
The interest point matches can be visualized with ``stereo_gui``
(:numref:`stereo_gui_view_ip`) as::
stereo_gui img/1801_mask.tif img/lro.map.tif \
run/run-1801__lro.map.match
.. figure:: ../images/change3_lro.png
Interest point matches between masked Chang'e image 1801 and mapprojected LRO
NAC image M1154358210RE. Similar results are obtained for the other images.
Initial camera models
~~~~~~~~~~~~~~~~~~~~~
The Chang'e 3 landing camera is a frame camera. The input .2A datasets mention
that it has a focal length of 8.3 mm and a pixel size of 6.7 micrometers, which
makes the focal length in pixels be about 1238.805 pixels.
The image dimensions are 1024 x 1024 pixels. It can be assumed that the optical
center is at the center of the image, so its coordinates are (511.5, 511.5).
The lens distortion model is not known. We will assume the standard radial-tangential
distortion model, and will initialize all distortion coefficients with small values,
such as 1e-7, that will be optimized later.
This allows us to build a Pinhole model (:numref:`pinholemodels`) with nominal
camera position and orientation. We will save it to a file called ``sample.tsai``,
with the following content::
VERSION_4
PINHOLE
fu = 1238.805
fv = 1238.805
cu = 511.5
cv = 511.5
u_direction = 1 0 0
v_direction = 0 1 0
w_direction = 0 0 1
C = 0 0 0
R = 1 0 0 0 1 0 0 0 1
pitch = 1
Tsai
k1 = 1e-7
k2 = 1e-7
p1 = 1e-7
p2 = 1e-7
k3 = 1e-7
We will make use of the GCP found earlier to infer the camera position and orientation.
This is done with ``bundle_adjust`` (:numref:`bundle_adjust`) as::
bundle_adjust \
img/1801_mask.tif \
sample.tsai \
gcp/gcp_1801.gcp \
--datum D_MOON \
--inline-adjustments \
--init-camera-using-gcp \
--camera-weight 0 \
--num-iterations 100 \
-o ba/run
cp ba/run-sample.tsai img/1801.tsai
The camera model was copied to ``img/1801.tsai``.
We will convert this Pinhole model right away to a CSM model (:numref:`csm`), to
be in the same format as the LRO data. This is done with ``cam_gen``
(:numref:`cam_gen`)::
cam_gen \
--datum D_MOON \
img/1801_mask.tif \
--input-camera img/1801.tsai \
-o img/1801.json
The camera model can be validated by mapprojection onto the prior DEM::
mapproject --tr 2.0 \
--t_srs "$proj" \
ref/ref_blur.tif \
img/1801_mask.tif \
img/1801.json \
img/1801.map.tif
The value of ``$proj`` is the same as before.
The resulting mapprojected image can be overlaid on top of the LRO NAC
mapprojected image. Some misalignment is expected at this stage.
More validation strategies are discussed in :numref:`cam_gcp_validation`.
.. figure:: ../images/change3_over_lro.png
Mapprojected and masked Chang'e 3 image 1801 overlaid on top of the LRO NAC
mapprojected image. The masked pixels are shown as transparent. A careful
inspection shows good initial agreement, but some local deformation is seen,
which is likely due to some tilt and lens distortion not being modeled yet.
This will be fixed later.
.. _change_opt:
Optimization of intrinsics
~~~~~~~~~~~~~~~~~~~~~~~~~~
We will optimize the intrinsics and extrinsics of the Chang'e 3 cameras,
including the lens distortion, with the LRO data serving as a constraint.
The general approach from :numref:`ba_frame_linescan` is followed, while
dense matches from disparity are employed, to ensure the best results.
Stereo will be run between any pair of images: ``1801``, ``1831``, ``lro``, and
dense matches from stereo correlation (disparity) will be produced
(:numref:`dense_ip`).
::
i=1801; j=1831
parallel_stereo \
img/${i}.map.tif img/${j}.map.tif \
img/${i}.json img/${j}.json \
--stereo-algorithm asp_mgm \
--num-matches-from-disparity 10000 \
stereo_map_${i}_${j}/run \
ref/ref_blur.tif
This is repeated for ``i=1801; j=lro``, and ``i=1831; j=lro``.
The dense match files are copied to the same location::
mkdir -p dense_matches
cp stereo_map*/run-disp*match dense_matches
Separate lists are made of Chang'e 3 and LRO images and cameras::
ls img/{1801,1831}_mask.tif > change3_images.txt
ls img/lro.cub > lro_images.txt
ls img/{1801,1831}.json > change3_cameras.txt
ls img/lro.json > lro_cameras.txt
Bundle adjustment is run::
bundle_adjust \
--image-list change3_images.txt,lro_images.txt \
--camera-list change3_cameras.txt,lro_cameras.txt \
--solve-intrinsics \
--intrinsics-to-float \
'1:focal_length,optical_center,other_intrinsics 2:none' \
--heights-from-dem ref/ref_blur.tif \
--heights-from-dem-uncertainty 100 \
--match-files-prefix dense_matches/run-disp \
--max-pairwise-matches 50000 \
--num-iterations 50 \
-o ba_dense/run
The value of ``--heights-from-dem-uncertainty`` is set to 100 meters, as
we know that the input cameras are not yet aligned to the input DEM,
so this accounts for the misregistration. This option would fail
for very large misregistration, when a preliminary alignment
would be needed.
Stereo is run between images ``1801`` and ``lro`` with the optimized
cameras and reusing the previous run from above::
parallel_stereo \
img/1801.map.tif img/lro.map.tif \
ba_dense/run-1801.adjusted_state.json \
ba_dense/run-lro.adjusted_state.json \
--stereo-algorithm asp_mgm \
--prev-run-prefix stereo_map_1801_lro/run \
stereo_map_opt_1801_lro/run \
ref/ref_blur.tif
These two images have a convergence angle of 45 degrees, which is very good
for stereo (:numref:`stereo_pairs`).
The Chang'e 3 images are not going to produce a good DEM between themselves,
because of the very small convergence angle, as mentioned earlier.
A DEM is created, at 4 meters per pixel, with ``point2dem`` (:numref:`point2dem`)::
point2dem --tr 4.0 \
--errorimage \
stereo_map_opt_1801_lro/run-PC.tif
It is good to inspect the resulting triangulation error image to ensure lens
distortion was solved for and no systematic errors are present
(:numref:`point2dem_ortho_err`).
The produced DEM can be aligned to the original DEM with ``pc_align``
(:numref:`pc_align`), and the aligned cloud can be made back into a DEM::
pc_align --max-displacement 100 \
--save-inv-transformed-reference-points \
--alignment-method point-to-plane \
stereo_map_opt_1801_lro/run-DEM.tif \
ref/ref.tif \
-o align/run
point2dem --tr 4.0 \
align/run-trans_reference.tif
The resulting alignment transform can be applied to the optimized cameras in the
``ba_dense`` directory (:numref:`ba_pc_align`). After mapprojection with the
optimized and aligned cameras onto ``ref/ref.tif``, no distortion or
misalignment is seen.
.. figure:: ../images/change3_lro_dem.png
Left: The produced aligned DEM with frame 1801. Right: the original LRO NAC DEM.
The Chang'e 3 images are are at a lower resolution, and somewhat differ in
illumination from the LRO NAC image, so the quality of the resulting DEM is
lower. However, the larger features are captured correctly, and the alignment is
also very good.
Multi-image registration
~~~~~~~~~~~~~~~~~~~~~~~~
The approach for registering a longer sequence of Chang'e 3 images to each other and to
LRO NAC is very similar.
GCP are computed automatically for each image. Pairwise dense matches are found
between each image and the next, and between each image and the LRO NAC image.
Bundle adjustment can be run as above, while optimizing the intrinsics.
Stereo is run between each Chang'e 3 image and the LRO NAC image, with the optimized
cameras. The resulting DEMs can be merged with ``dem_mosaic``, and the produced mosaic
is aligned to the original LRO NAC DEM with ``pc_align``.
The alignment transform is applied to the optimized cameras
(:numref:`ba_pc_align`). The images with the resulting cameras are mapprojected
onto the original LRO NAC DEM. If needed, the bundle adjustment from above can
be rerun with the now well-aligned cameras and a lower
``--heights-from-dem-uncertainty``.
For a very long sequence of images this method can become impractical. In that
case, the intrinsics that are optimized as demonstrated earlier for a short
stretch can be used with Structure-from-Motion (:numref:`sfm`) on the full
sequence. Just a few well-distributed GCP may be needed to transform the cameras
to ground coordinates. DEM creation and alignment refinement can be as earlier.
If the intrinsics are not optimized, then dense matches are not required, and
the sparse matches produced ``camera_solve`` in SfM or by ``bundle_adjust``
should be enough.
.. figure:: ../images/change3_many_over_lro.png
From top to bottom, the mapprojected Chang'e images 1780, 1801, 1831, 1861,
1891, and 1910, with the LRO NAC image in the background. These have been
pixel-level registered to each other, to the LRO NAC image, and to the LRO NAC
DEM. The footprint of the images is decreasing along the sequence, and the
resolution is increasing, as the lander is descending. A portion of the data
was cropped on the right to remove the noise due to the lander body and to make
it easier to evaluate the registration visually.
================================================
FILE: docs/examples/csm.rst
================================================
.. _csm:
Community Sensor Model
----------------------
The Community Sensor Model (CSM), established by the U.S. defense
and intelligence community, has the goal of standardizing camera
models for various remote sensor types :cite:`CSMTRD`. It provides
a well-defined application program interface (API) for multiple
types of sensors and has been widely adopted by Earth remote sensing
software systems :cite:`hare2017community,2019EA000713`.
ASP supports and ships the `USGS implementation
`_ of CSM for planetary images,
which provides Linescan, Frame, Pushframe, and Synthetic Aperture Radar (SAR)
implementations.
CSM is handled via dynamically loaded plugins. Hence, if a user has a
new sensor model, ASP should, in principle, be able to use it as soon
as a supporting plugin is added to the existing software, without
having to rebuild ASP or modify it otherwise. In practice, while this
logic is implemented, ASP defaults to using only the USGS
implementation, though only minor changes are needed to support
additional plugins.
Each stereo pair to be processed by ASP should be made up of two
images (for example ``.cub`` or ``.tif`` files) and two plain
text camera files with ``.json`` extension. The CSM information is
contained in the ``.json`` files and it determines which plugin to
load to use with those cameras.
CSM model state data can also be embedded in ISIS .cub files
(:numref:`embedded_csm`).
.. _csm_frame:
The USGS CSM Frame sensor
~~~~~~~~~~~~~~~~~~~~~~~~~
The USGS CSM *Frame* sensor models a frame camera. All the
pixels get acquired at the same time, unlike for pushbroom and
pushframe cameras, which keep on acquiring image lines as they fly
(those are considered later in the text). Hence, a single camera
center and orientation is present. This model serves the same function
as ASP's own Pinhole camera model (:numref:`pinholemodels`).
:numref:`csm_frame_def` discusses the CSM Frame sensor in some detail,
including the distortion model.
In this example we will consider images acquired with the Dawn
Framing Camera instrument, which took pictures of the Ceres and Vesta
asteroids. This particular example will be for Vesta. Note that one
more example of this sensor is shown in this documentation, in
:numref:`dawn_isis`, which uses ISIS ``.cub`` camera models rather
than CSM ones.
This example is available for `download `_.
Creating the input images
^^^^^^^^^^^^^^^^^^^^^^^^^
Fetch the data from PDS then extract it::
wget https://sbib.psi.edu/data/PDS-Vesta/Survey/img-1B/FC21B0004011_11224024300F1E.IMG.gz
wget https://sbib.psi.edu/data/PDS-Vesta/Survey/img-1B/FC21B0004012_11224030401F1E.IMG.gz
gunzip FC21B0004011_11224024300F1E.IMG.gz
gunzip FC21B0004012_11224030401F1E.IMG.gz
For simplicity of notation, we will rename these to ``left.IMG`` and ``right.IMG``.
Set up the ISIS environment (:numref:`planetary_images`).
These will need adjusting for your system::
export ISISROOT=$HOME/miniconda3/envs/isis
export PATH=$ISISROOT/bin:$PATH
export ISISDATA=$HOME/isisdata
Create cub files and initialize the kernels::
dawnfc2isis from = left.IMG to = left.cub target = VESTA
dawnfc2isis from = right.IMG to = right.cub target = VESTA
spiceinit from = left.cub
spiceinit from = right.cub
The ``target`` field is likely no longer needed in newer versions of
ISIS.
.. _create_csm_dawn:
Creation of CSM Frame camera files
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set::
export ALESPICEROOT=$ISISDATA
Run the ``isd_generate`` command to create the CSM camera files. This script
is part of the ALE package, and should be shipped with the latest ISIS
or ASP, if installed with conda. It can also be installed separately.
::
isd_generate -k left.cub left.cub
isd_generate -k right.cub right.cub
This will create ``left.json`` and ``right.json``.
See the `isd_generate manual
`_.
As a sanity check, run ``cam_test`` (:numref:`cam_test`) to see how well the CSM
camera approximates the ISIS camera::
cam_test --image left.cub --cam1 left.cub --cam2 left.json
cam_test --image right.cub --cam1 right.cub --cam2 right.json
Note that for a handful of pixels these errors may be big. That is a
known issue, and it seems to be due to the fact that a ray traced from
the camera center towards the ground may miss the body of the asteroid.
That should not result in inaccurate stereo results.
Running stereo
^^^^^^^^^^^^^^
::
parallel_stereo --stereo-algorithm asp_mgm \
--left-image-crop-win 243 161 707 825 \
--right-image-crop-win 314 109 663 869 \
left.cub right.cub left.json right.json \
run/run
See :numref:`nextsteps` for a discussion about various
speed-vs-quality choices when running stereo.
This is followed by creation of a DEM (:numref:`point2dem`)
and products that can be visualized (:numref:`genhillshade`)::
point2dem run/run-PC.tif --orthoimage run/run-L.tif
hillshade run/run-DEM.tif
colormap run/run-DEM.tif -s run/run-DEM_HILLSHADE.tif
.. figure:: ../images/CSM_Frame.png
:name: CSM_Frame_example
The produced colorized DEM and orthoimage for the CSM Frame camera
example. Likely using mapprojection (:numref:`mapproj-example`)
may have reduced the number and size of the holes in the DEM.
.. _csm_linescan:
The USGS CSM linescan sensor
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this example we will use the Mars CTX linescan sensor. The images are regular
``.cub`` files as in the tutorial in :numref:`moc_tutorial`, hence the only
distinction compared to that example is that the cameras are stored as ``.json``
files.
We will work with the dataset pair::
J03_045994_1986_XN_18N282W.cub J03_046060_1986_XN_18N282W.cub
which, for simplicity, we will rename to ``left.cub`` and ``right.cub``
and the same for the associated camera files.
See :numref:`kaguya_tc` for another linescan example for the Kaguya linescan
sensor for the Moon.
.. _create_csm_linescan:
Creation CSM linescan cameras
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Note that this recipe looks a little different for Frame and SAR cameras,
as can be seen in :numref:`create_csm_dawn` and :numref:`create_csm_sar`.
Run the ISIS ``spiceinit`` command on the .cub files as::
spiceinit from = left.cub
spiceinit from = right.cub
Next, CSM cameras are created, with ``isd_generate``. This program is discussed
in :numref:`create_csm_dawn`.
*Note:* Currently shipped version of ``isd_generate`` (in ALE 1.0.2) has a bug,
creating very large linescan camera models that are very slow to load. If this
is noticed, consider putting `this fix
`_ in the ``type_sensor.py`` file in
the ALE package.
Run::
isd_generate left.cub
isd_generate right.cub
This will produce ``left.json`` and ``right.json``.
See the `isd_generate manual
`_.
Running stereo
^^^^^^^^^^^^^^
::
parallel_stereo --stereo-algorithm asp_mgm \
--subpixel-mode 9 \
left.cub right.cub left.json right.json \
run/run
Check the stereo convergence angle as printed during preprocessing
(:numref:`stereo_pairs`). If that angle is small, the results are not
going to be great.
See :numref:`nextsteps` for a discussion about various stereo
algorithms and speed-vs-quality choices.
The fancier MGM algorithm could be used by running this example with
``--stereo-algorithm asp_mgm``.
The actual stereo session used is ``csm``, and here it will be
auto-detected based on the extension of the camera files.
Next, a DEM is produced (:numref:`point2dem`)::
point2dem -r mars --stereographic \
--proj-lon 77.4 --proj-lat 18.4 \
run/run-PC.tif
For ``point2dem`` we chose to use a stereographic projection centered at
some point in the area of interest. See :numref:`point2dem_proj`
for how how a projection for the DEM can be auto-determined.
One can also run ``parallel_stereo`` with mapprojected images
(:numref:`mapproj-example`). The first step would be to create a
low-resolution smooth DEM from the previous cloud::
point2dem -r mars \
--stereographic \
--proj-lon 77.4 --proj-lat 18.4 \
--tr 120 \
run/run-PC.tif \
-o run/run-smooth
followed by mapprojecting onto it and redoing stereo::
mapproject --tr 6 run/run-smooth-DEM.tif left.cub \
left.json left.map.tif
mapproject --tr 6 run/run-smooth-DEM.tif right.cub \
right.json right.map.tif
parallel_stereo --stereo-algorithm asp_mgm \
--subpixel-mode 9 \
left.map.tif right.map.tif left.json right.json \
run_map/run run/run-smooth-DEM.tif
Notice how we used the same resolution for both images when
mapprojecting. That helps making the resulting images more similar and
reduces the processing time (:numref:`mapproj-res`).
.. _csm_wac:
CSM Pushframe sensor
~~~~~~~~~~~~~~~~~~~~
The USGS CSM *Pushframe* sensor models a pushframe camera. The support for this sensor
is not fully mature, and some artifacts can be seen in the DEMs (per below).
What follows is an illustration of using this sensor with Lunar Reconnaissance
Orbiter (LRO) WAC images.
This example, including the inputs, recipe, and produced terrain model
`can be downloaded `_.
Fetching the data
^^^^^^^^^^^^^^^^^
We will focus on the monochromatic images for this sensor. Visit:
https://ode.rsl.wustl.edu/moon/indexproductsearch.aspx
Find the *Lunar Reconnaissance Orbiter -> Experiment Data Record Wide
Angle Camera - Mono (EDRWAM)* option.
Search either based on a longitude-latitude window, or near a notable
feature, such as a named crater. We choose a couple of images having
the Tycho crater, with download links::
http://pds.lroc.asu.edu/data/LRO-L-LROC-2-EDR-V1.0/LROLRC_0002/DATA/MAP/2010035/WAC/M119923055ME.IMG
http://pds.lroc.asu.edu/data/LRO-L-LROC-2-EDR-V1.0/LROLRC_0002/DATA/MAP/2010035/WAC/M119929852ME.IMG
Fetch these with ``wget``.
Creation of .cub files
^^^^^^^^^^^^^^^^^^^^^^
We broadly follow the tutorial at :cite:`ohman2015procedure`. For a
dataset called ``image.IMG``, do::
lrowac2isis from = image.IMG to = image.cub
This will create so-called *even* and *odd* datasets, with names like
``image.vis.even.cub`` and ``image.vis.odd.cub``.
Run ``spiceinit`` on them to set up the SPICE kernels::
spiceinit from = image.vis.even.cub
spiceinit from = image.vis.odd.cub
followed by ``lrowaccal`` to adjust the image intensity::
lrowaccal from = image.vis.even.cub to = image.vis.even.cal.cub
lrowaccal from = image.vis.odd.cub to = image.vis.odd.cal.cub
All these .cub files can be visualized with ``stereo_gui``. It can be
seen that instead of a single contiguous image we have a set of narrow
horizontal framelets, with some of these in the even and some in the odd
cub file. The framelets may also be recorded in reverse.
Production of seamless mapprojected images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This is not needed for stereo, but may be useful for readers who would
like to produce image mosaics using ``cam2map``.
::
cam2map from = image.vis.even.cal.cub to = image.vis.even.cal.map.cub
cam2map from = image.vis.odd.cal.cub to = image.vis.odd.cal.map.cub \
map = image.vis.even.cal.map.cub matchmap = true
Note how in the second ``cam2map`` call we used the ``map`` and
``matchmap`` arguments. This is to ensure that both of these output
images have the same resolution and projection. In particular, if more
datasets are present, it is suggested for all of them to use the same
previously created .cub file as a map reference. That because stereo
works a lot better on mapprojected images with the same ground
resolution. For more details see :numref:`mapproj-example` and
:numref:`mapproj_with_cam2map`.
To verify that the obtained images have the same ground resolution, do::
gdalinfo image.vis.even.cal.map.cub | grep -i "pixel size"
gdalinfo image.vis.odd.cal.map.cub | grep -i "pixel size"
(see :numref:`gdal_tools` regarding this tool).
The fusion happens as::
ls image.vis.even.cal.map.cub image.vis.odd.cal.map.cub > image.txt
noseam fromlist = image.txt to = image.noseam.cub SAMPLES=73 LINES=73
The obtained file ``image.noseam.cub`` may still have some small artifacts
but should be overall reasonably good.
Stitching the raw even and odd images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This requires ISIS newer than version 6.0, or the latest development code.
For each image in the stereo pair, stitch the even and odd datasets::
framestitch even = image.vis.even.cal.cub odd = image.vis.odd.cal.cub \
to = image.cub flip = true num_lines_overlap = 2
The ``flip`` flag is needed if the order of framelets is reversed
relative to what the image is expected to show.
The parameter ``num_lines_overlap`` is used to remove a total of this
many lines from each framelet (half at the top and half at the bottom)
before stitching, in order to deal with the fact that the even and odd
framelets have a little overlap, and that they also tend to have artifacts
due to some pixels flagged as invalid in each first and last framelet
row.
The CSM camera models will assume that this parameter is set at 2, so
it should not be modified. Note however that WAC framelets may overlap
by a little more than that, so resulting DEMs may have some artifacts
at framelet borders, as can be seen further down.
Creation of CSM WAC cameras
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set::
export ALESPICEROOT=$ISISDATA
CSM cameras are created, with ``isd_generate``. This program is discussed
in :numref:`create_csm_dawn`. Run::
isd_generate -k image.vis.even.cal.cub image.vis.even.cal.cub
isd_generate -k image.vis.odd.cal.cub image.vis.odd.cal.cub
These will create ``image.vis.even.cal.json`` and ``image.vis.odd.cal.json``.
Do not use the stitched .cub file as that one lacks camera information. The
obtained .json files can be renamed to follow the same convention as the
stitched .cub images.
Running stereo
^^^^^^^^^^^^^^
::
parallel_stereo --stereo-algorithm asp_mgm \
--left-image-crop-win 341 179 727 781 \
--right-image-crop-win 320 383 824 850 \
M119923055ME.cub M119929852ME.cub \
M119923055ME.json M119929852ME.json \
run/run
As printed by ``stereo_pprc``, the convergence angle is about 27
degrees, which is a good number.
See :numref:`nextsteps` for a discussion about various stereo
speed-vs-quality choices.
A DEM is produced with ``point2dem`` (:numref:`point2dem`), and other products
are made for visualization (:numref:`manipulating_results`)::
point2dem --stereographic --auto-proj-center \
run/run-PC.tif --orthoimage run/run-L.tif
hillshade run/run-DEM.tif
colormap run/run-DEM.tif -s run/run-DEM_HILLSHADE.tif
.. figure:: ../images/CSM_WAC.png
:name: CSM_WAC_example
The produced colorized DEM and orthoimage for the CSM WAC camera
example. The artifacts are due to issues stitching of even and odd
framelets.
It can be seen that the stereo DEM has some linear artifacts. That is
due to the fact that the stitching does not perfectly integrate the
framelets.
An improved solution can be obtained by creating a low-resolution
version of the above DEM, mapprojecting the images on it, and then
re-running stereo, per (:numref:`mapproj-example`).
::
point2dem --stereographic --auto-proj-center \
--tr 800 run/run-PC.tif --search-radius-factor 5 \
-o run/run-low-res
mapproject --tr 80 run/run-low-res-DEM.tif \
M119923055ME.cub M119923055ME.json M119923055ME.map.tif
mapproject --tr 80 run/run-low-res-DEM.tif \
M119929852ME.cub M119929852ME.json M119929852ME.map.tif
parallel_stereo --stereo-algorithm asp_mgm \
M119923055ME.map.tif M119929852ME.map.tif \
M119923055ME.json M119929852ME.json \
run_map/run run/run-low-res-DEM.tif
point2dem --stereographic --auto-proj-center \
run_map/run-PC.tif --orthoimage run_map/run-L.tif
hillshade run_map/run-DEM.tif
colormap run_map/run-DEM.tif -s run_map/run-DEM_HILLSHADE.tif
To create the low-resolution DEM we used a grid size of 800 m,
which is coarser by a factor of about 8 compared to the nominal WAC
resolution of 100 / pixel.
Note that the same resolution is used when mapprojecting both images; that is
very important to avoid a large search range in stereo later. This is discussed
in more detail in :numref:`mapproj-example`.
.. figure:: ../images/CSM_WAC_mapproj.png
:name: CSM_WAC_example_mapproj
The produced colorized DEM and orthoimage for the CSM WAC camera
example, when mapprojected images are used.
As can be seen in the second figure, there are somewhat fewer artifacts.
The missing lines in the DEM could be filled in if ``point2dem`` was run
with ``--search-radius-factor 4``, for example.
Given that there exists a wealth of WAC images, one could also try to
get several more stereo pairs with similar illumination, run bundle
adjustment for all of them (:numref:`bundle_adjust`), run pairwise
stereo, create DEMs (at the same resolution), and then merge them with
``dem_mosaic`` (:numref:`dem_mosaic`). This may further attenuate the
artifacts as each stereo pair will have them at different
locations. See :numref:`stereo_pairs` for guidelines about how to
choose good stereo pairs.
.. _csm_minirf:
The USGS CSM SAR sensor for LRO Mini-RF
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This page describes processing data produced with the *Mini-RF* Synthetic
Aperture Radar (SAR) sensor on the LRO spacecraft while making use of CSM
cameras. A SAR example for Earth is in :numref:`umbra_sar`.
It is challenging to process its data with ASP for several
reasons:
- The synthetic image formation model produces curved rays going from the
ground to the pixel in the camera (:cite:`kirk2016semi`). To simplify the
calculations, ASP finds where a ray emanating from the camera
intersects the standard Moon ellipsoid with radius 1737.4 km and
declares the ray to be a straight line from the camera center to this
point.
- This sensor very rarely acquires stereo pairs. The convergence angle
(:numref:`stereo_pairs`) as printed by ``parallel_stereo`` in
pre-processing is usually less than 5 degrees, which is little and
results in noisy DEMs. In this example we will use a dataset
intentionally created with stereo in mind. The images will cover a
part of Jackson crater (:cite:`kirk2011radargrammetric`).
- It is not clear if all modeling issues with this sensor were
resolved. The above publication states that "Comparison of the stereo
DTM with ~250 m/post LOLA grid data revealed (in addition to
dramatically greater detail) a very smooth discrepancy that varied
almost quadratically with latitude and had a peak-to-peak amplitude
of nearly 4000 m."
- The images are dark and have unusual appearance, which requires
some pre-processing and a large amount of interest points.
Hence, ASP's support for this sensor is experimental. The results
are plausible but likely not fully rigorous.
This example, including input images, produced outputs, and a recipe, is available
for download at:
https://github.com/NeoGeographyToolkit/StereoPipelineSolvedExamples
No ISIS data are needed to run it.
Creating the input images
^^^^^^^^^^^^^^^^^^^^^^^^^
Fetch the data from PDS::
wget https://pds-geosciences.wustl.edu/lro/lro-l-mrflro-4-cdr-v1/lromrf_0002/data/sar/03800_03899/level1/lsz_03821_1cd_xku_16n196_v1.img
wget https://pds-geosciences.wustl.edu/lro/lro-l-mrflro-4-cdr-v1/lromrf_0002/data/sar/03800_03899/level1/lsz_03821_1cd_xku_16n196_v1.lbl
wget https://pds-geosciences.wustl.edu/lro/lro-l-mrflro-4-cdr-v1/lromrf_0002/data/sar/03800_03899/level1/lsz_03822_1cd_xku_23n196_v1.img
wget https://pds-geosciences.wustl.edu/lro/lro-l-mrflro-4-cdr-v1/lromrf_0002/data/sar/03800_03899/level1/lsz_03822_1cd_xku_23n196_v1.lbl
These will be renamed to ``left.img``, ``right.img``, etc., to simply
the processing.
Set, per :numref:`planetary_images`, values for ``ISISROOT`` and ``ISISDATA``. Run::
mrf2isis from = left.lbl to = left.cub
mrf2isis from = right.lbl to = right.cub
Run ``spiceinit``. Setting the shape to the ellipsoid makes it easier
to do image-to-ground computations::
spiceinit from = left.cub shape = ellipsoid
spiceinit from = right.cub shape = ellipsoid
.. _create_csm_sar:
Creation of CSM SAR cameras
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set::
export ALESPICEROOT=$ISISDATA
CSM cameras are created, with ``isd_generate``. This program is discussed
in :numref:`create_csm_dawn`. Run::
isd_generate -k left.cub left.cub
isd_generate -k right.cub right.cub
This will create the CSM camera file ``left.json`` and ``right.json``.
Run ``cam_test`` (:numref:`cam_test`) as a sanity check::
cam_test --image left.cub --cam1 left.cub --cam2 left.json
cam_test --image right.cub --cam1 right.cub --cam2 right.json
Preparing the images
^^^^^^^^^^^^^^^^^^^^
ASP accepts only single-band images, while these .cub files have four of them.
We will pull the first band and clamp it to make it easier for stereo to find
interest point matches::
gdal_translate -b 1 left.cub left_b1.tif
gdal_translate -b 1 right.cub right_b1.tif
image_calc -c "min(var_0, 0.5)" left_b1.tif -d float32 \
-o left_b1_clamp.tif
image_calc -c "min(var_0, 0.5)" right_b1.tif -d float32 \
-o right_b1_clamp.tif
Running stereo
^^^^^^^^^^^^^^
It is simpler to first run a clip with ``stereo_gui``
(:numref:`stereo_gui`). This will result in the following command::
parallel_stereo --ip-per-tile 3500 \
--left-image-crop-win 0 3531 3716 10699 \
--right-image-crop-win -513 22764 3350 10783 \
--stereo-algorithm asp_mgm --min-num-ip 10 \
left_b1_clamp.tif right_b1_clamp.tif \
left.json right.json run/run
The stereo convergence angle for this pair is 18.4 degrees which is
rather decent.
Create a colorized DEM and orthoimage (:numref:`point2dem`)::
point2dem run/run-PC.tif --orthoimage run/run-L.tif
hillshade run/run-DEM.tif
colormap run/run-DEM.tif -s run/run-DEM_HILLSHADE.tif
See :numref:`nextsteps` for a discussion about various
speed-vs-quality choices when running stereo.
.. figure:: ../images/CSM_MiniRF.png
:name: CSM_miniRF_example
The produced colorized DEM and orthoimage for the CSM SAR example.
.. _csm_msl:
CSM cameras for MSL
~~~~~~~~~~~~~~~~~~~
This example shows how, given a set of Mars Science Laboratory (MSL) Curiosity
rover ``Nav`` or ``Mast`` camera images, CSM camera models can be created. Stereo
pairs are then used (with either ``Nav`` or ``Mast`` data) to make DEMs and
orthoimages.
After recent fixes in ALE (details below), the camera models are accurate enough
that stereo pairs acquired at different rover locations and across different days
result in consistent DEMs and orthoimages.
See :numref:`rig_msl` for a Structure-from-Motion solution without using CSM
cameras. That one results in self-consistent meshes that, unlike the DEMs
produced here, are not geolocated.
Illustration
^^^^^^^^^^^^
.. figure:: ../images/MSL_Kimberly_images.png
:name: csm_msl_figure1
:alt: MSL Kimberly images
Four out of the 10 images (5 stereo pairs) used in this example.
.. figure:: ../images/MSL_Kimberly_DEM_DRG.png
:name: csm_msl_figure2
:alt: MSL Kimberly DEM and ortho
Produced DEM and orthoimage. See :numref:`csm_msl_multiday` for a larger
example.
Fetch the images and metadata from PDS
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
See :numref:`msl_image_prep`. Here we will work with .cub files rather than
converting them to .png. The same Mars day will be used as there (SOL 597). The
datasets for SOL 603 were verified to work as well.
The dataset used in this example (having .LBL, .cub, and .json files) is
available `for download
`_.
It is suggested to recreate the .json files in that dataset as done below.
Download the SPICE data
^^^^^^^^^^^^^^^^^^^^^^^
Fetch the SPICE kernels for MSL (see :numref:`planetary_images` and the links
from there).
.. _csm_msl_create:
Creation of CSM MSL cameras
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Set::
export ALESPICEROOT=$ISISDATA
A full-resolution MSL left ``Nav`` image uses the naming convention::
NLB__F.cub
with the right image starting instead with ``NRB``. The metadata files
downloaded from PDS end with ``.LBL``.
A bug in the shipped metakernels requires editing the file::
$ISISDATA/msl/kernels/mk/msl_v01.tm
and replacing::
/usgs/cpkgs/isis3/data
with your value of $ISISDATA. Ensure that the resulting path still ends with
``/msl/kernels``.
CSM cameras are created, with ``isd_generate``. This program is discussed
in :numref:`create_csm_dawn`. Run::
isd_generate image.LBL
This will produce the file ``image.json``.
If running into issues, invoke this command with the ``-v`` option to see where
it fails.
Simple stereo example
^^^^^^^^^^^^^^^^^^^^^
In this example the camera orientations are not refined using bundle adjustment,
as the camera poses are reasonably good. If desired to do that, one could run
``bundle_adjust`` (:numref:`bundle_adjust`) as::
bundle_adjust --no-datum --camera-weight 0 --tri-weight 0.1 \
data/*.cub data/*.json -o ba/run
Here and below we use the option ``--no-datum`` as these are ground-level cameras,
when rays emanating from them may not reliably intersect the planet datum.
For each stereo pair, run ``parallel_stereo`` (:numref:`parallel_stereo`) as::
parallel_stereo \
--stereo-algorithm asp_mgm \
--subpixel-mode 3 --no-datum \
--min-triangulation-angle 1.5 \
left.cub right.cub \
left.json right.json \
run/run
If bundle adjustment was used, the above command should be run with the option
``--bundle-adjust-prefix ba/run``.
The option ``--min-triangulation-angle 1.5`` is highly essential. It filters out
far-away and noisy points. Increasing this will remove more points. For
terrains with a lot of shadows (such as for the Moon), also consider using the
option ``--no-data-value`` to filter out pixels with low intensity
(:numref:`stereodefault`).
This is followed by DEM and orthoimage creation (:numref:`point2dem`) with::
point2dem --stereographic \
--proj-lon 137.402 --proj-lat -4.638 \
--search-radius-factor 5 --orthoimage \
run/run-PC.tif run/run-L.tif
Here, the option ``--search-radius-factor 5`` is used to fill the point cloud
when moving further from the rover. A local stereographic projection was used.
The produced DEMs can be mosaicked together with ``dem_mosaic``
(:numref:`dem_mosaic`) as::
dem_mosaic */*DEM.tif -o dem_mosaic.tif
For the orthoimages, one can use::
dem_mosaic --first */*DRG.tif -o ortho_mosaic.tif
The option ``--first`` picks the first encountered image pixel at each location,
rather than blending them together which may blur the output mosaic.
See an illustration in :numref:`csm_msl_figure2`, with the input images in :numref:`csm_msl_figure1`.
.. _csm_msl_multiday:
Multi-day stereo
^^^^^^^^^^^^^^^^
.. figure:: ../images/msl_multiday.png
:name: msl_multiday
:alt: MSL multiday stereo
A combined DEM and orthoimage produced from 15 stereo pairs from SOL 597 and
13 stereo pairs from SOL 603. The misregistration half-way down is not due to
mismatch across days but because of insufficient overlap between two image
subsets on SOL 603. Here, blue and red correspond to elevations of -5038.921
and -5034.866 meters.
In this example we take advantage of the fact that there is decent overlap
between images acquired on SOL 597 and SOL 603. They both image the same hill,
called *Kimberly*, in Gale crater, from somewhat different perspectives. Hence
we combine these datasets to increase the coverage.
Good overlap between different days, or even between consecutive rover
stops in the same day, is not guaranteed. Sometimes the low-resolution nav cam
images (:numref:`low_res_msl`) can help with increasing the overlap and
coverage. Lack of good overlap can result in registration errors, as can be seen
in :numref:`msl_multiday`.
For a larger and better-behaved dataset it is suggested to consider the images
from SOL 3551 to 3560. Some effort may be needed to select a good subset.
A workflow can be follows. First, individual DEMs were created and mosaicked,
as in :numref:`csm_msl`. The quality of the produced DEM can be quite uneven,
especially far from the camera.
Large holes in the initial DEM were filled in with the ``dem_mosaic`` option
``--fill-search-radius`` (:numref:`dem_mosaic_extrapolate`).
Then, it can be made coarser, for example, as::
gdalwarp -r cubic -tr 0.1 0.1 input.tif output.tif
(This assumes the projection is local stereographic.)
This DEM was then blurred a few times with ``dem_mosaic`` option
``--dem-blur-sigma 10``. This should be repeated until the DEM is smooth enough
and shows no artifacts. The resulting DEM is called ``dem.tif``.
All images were mapprojected onto this DEM using the same local stereographic
projection, and a resolution of 0.01 m::
proj="+proj=stere +lat_0=-4.638 +lon_0=137.402 +k=1 +x_0=0 +y_0=0 +R=3396190 +units=m +no_defs"
mapproject --tr 0.01 --t_srs "$proj" \
dem.tif image.cub image.json image.map.tif
Bundle adjustment was run on the desired set of input images and cameras, while
making use of the mapprojected images to find matches::
dem=dem.tif
parallel_bundle_adjust \
--image-list images.txt \
--camera-list cameras.txt \
--mapprojected-data-list map_images.txt \
--camera-weight 0 \
--heights-from-dem $dem \
--heights-from-dem-uncertainty 10.0 \
--heights-from-dem-robust-threshold 0.1 \
--auto-overlap-params "$dem 15" \
-o ba/run
In retrospect, this mapprojection step may be not necessary, and one could
run bundle adjustment with original images.
Then ``parallel_stereo`` was run with mapprojected images, with the option
``--bundle-adjust-prefix ba/run``, to use the bundle-adjusted cameras::
parallel_stereo \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--max-disp-spread 80 \
--min-triangulation-angle 1.5 \
--bundle-adjust-prefix ba/run \
left.map.tif right.map.tif \
left.json right.json run_map/run \
$dem
point2dem --tr 0.01 --stereographic \
--proj-lon 137.402 --proj-lat -4.638 \
--errorimage \
run_map/run-PC.tif \
--orthoimage run_map/run-L.tif
Each run must use a separate output prefix, instead of ``run_map/run``.
Here, the option ``--min-triangulation-angle 1.5`` was highly essential.
It filters out far-away and noisy points.
Even with this option, the accuracy of a DEM goes down far from the cameras.
Artifacts can arise where the same region is seen from two different locations,
and it is far from either. In this particular example some problematic portions
were cut out with ``gdal_rasterize`` (:numref:`gdal_rasterize_example`).
The produced DEMs were inspected, and the best ones were mosaicked together with
``dem_mosaic``, as follows::
dem_mosaic --weights-exponent 0.5 */*DEM.tif -o dem_mosaic.tif
The option ``--weights-exponent 0.5`` reduced the artifacts in blending.
The orthoimages were mosaicked with::
dem_mosaic --first */*DRG.tif -o ortho_mosaic.tif
It is suggested to sort the input images for this call from best to worst in
terms of quality. In particular, the images where the rover looks down rather
towards the horizon should be earlier in the list.
See the produced DEM and orthoimage in :numref:`msl_multiday`.
Mapprojection
^^^^^^^^^^^^^
The input .cub image files and the camera .json files can be used to create
mapprojected images with the ``mapproject`` program (:numref:`mapproject`).
The DEM for mapprojection can be the one created earlier with ``point2dem``.
If a third-party DEM is used, one has to make sure its elevations are consistent
with the DEMs produced earlier.
Use the option ``--t_projwin`` to prevent the produced images from extending for
a very long distance towards the horizon.
MSL Mast cameras
^^^^^^^^^^^^^^^^
The same procedure works for creating MSL Mast cameras. To run stereo, first use
``gdal_translate -b 1`` to pull the first band from the input images. This
workflow was tested with the stereo pair ``0706ML0029980010304577C00_DRCL`` and
``0706MR0029980000402464C00_DRCL`` for SOL 706.
.. _low_res_msl:
Low-resolution MSL Nav cam images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In addition to full-resolution Nav camera images (1024 pixels), MSL also
acquires low-resolution Nav images (256 pixels) at separate times. These have
the string ``_D`` as part of their name, instead of ``_F``. Such images were
validated to work, and can produce good DEMs that can plug some gaps in
coverage.
.. _csm_state:
CSM model state
~~~~~~~~~~~~~~~
CSM cameras are stored in JSON files, in one of the following two formats:
* *ISD*: This has the transforms from sensor coordinates to J2000, and from
J2000 to ECEF.
* *Model state*: In this file the above-mentioned transforms are combined, and
other information is condensed or removed.
The model state files have all the data needed to project ground points into the
camera and vice-versa, so they are sufficient for any use in ASP. The model state can
also be embedded in ISIS cubes (:numref:`embedded_csm`).
The `usgscsm_cam_test
`_
program (shipped with ASP) can read any of these and write back a model state.
ASP's bundle adjustment program (:numref:`bundle_adjust`) normally writes plain
text ``.adjust`` files that encode how the position and orientation of the
cameras were modified (:numref:`adjust_files`). If invoked for CSM cameras,
additional files with extension ``.adjusted_state.json`` are saved in the same
output directory, which contain the model state from the input CSM cameras with
the optimization adjustments applied to them. Use zero iterations in
``bundle_adjust`` to save the states of the original cameras.
This functionality is implemented for all USGS CSM sensors, so, for ``frame``,
``linescan``, ``pushframe``, and ``SAR`` models.
The ``cam_gen`` program can convert several linescan camera model types to CSM
model state (:numref:`cam_gen_linescan`). It can also approximate some Pinhole,
RPC, or other cameras with CSM frame cameras in model state format
(:numref:`cam_gen_frame`).
ASP's ``parallel_stereo`` and bundle adjustment programs can, in addition to CSM
ISD camera model files, also load such model state files, either as previously
written by ASP or from an external source (it will auto-detect the type from the
format of the JSON files). Hence, the model state is a convenient format for
data exchange, while being less complex than the ISD format.
If ``parallel_stereo`` is used to create a point cloud from
images and CSM cameras, and then that point cloud has a transform
applied to it, such as with ``pc_align``, the same transform can be
applied to the model states for the cameras using ``bundle_adjust``
(:numref:`ba_pc_align`).
To evaluate how well the obtained CSM camera approximates the ISIS
camera model, run the program ``cam_test`` shipped with ASP
(:numref:`cam_test`) as follows::
cam_test --sample-rate 100 --image camera.cub \
--cam1 camera.cub --cam2 camera.json
The pixel errors are expected to be at most on the order of 0.2
pixels.
.. _embedded_csm:
CSM state embedded in ISIS cubes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ASP usually expects CSM cameras to be specified in JSON files. It also accepts
CSM camera model state data (:numref:`csm_state`) embedded in ISIS cubes, if all
of the following (reasonable) assumptions are satisfied:
* JSON files are not passed in.
* The ISIS cubes contain CSM model state data (in the ``CSMState`` group).
* The ``--session-type`` (or ``--t``) option value is not set to ``isis`` (or
``isismapisis``). So, its value should be either ``csm`` (or ``csmmapcsm``),
or left blank.
Hence, if no CSM data is provided, either in the ISIS cubes or separately
in JSON files, or ``--session-type`` is set to ``isis`` (or ``isismapisis``),
ASP will use the ISIS camera models.
The above applies to all ASP tools that read CSM cameras (``parallel_stereo``,
``bundle_adjust``, ``jitter_solve``, ``mapproject``, ``cam_test``).
If ``bundle_adjust`` (:numref:`bundle_adjust`) or ``jitter_solve``
(:numref:`jitter_solve`) is run with CSM cameras, either embedded in ISIS cubes
or specified separately, and the flag ``--update-isis-cubes-with-csm-state`` is
set, then the optimized model states will be saved back to the ISIS cubes, while
the SPICE and other obsolete information from the cubes will be deleted.
(Note that `spiceinit
`_
can restore the cubes.)
Separate model state files in the JSON format will be saved by ``bundle_adjust``
as well, as done without this option.
Note that if images are mapprojected with certain camera files, and then those
camera files are updated in-place, this will result in wrong results if stereo
is run with the old mapprojected images and updated cameras.
The `csminit
`_
program can also embed a .json model state file into a .cub file (in ISIS
9.0.0 and later). Example::
csminit from = img.cub state = csm.json
================================================
FILE: docs/examples/ctx.rst
================================================
.. _ctx_example:
Mars Reconnaissance Orbiter CTX
-------------------------------
Overview
~~~~~~~~
CTX is a moderately difficult camera to work with. The processing time
can be pretty long when using the Bayes EM subpixel refinement
(``subpixel-mode 2``). Otherwise the disparity between images is
relatively small, allowing efficient computation and a reasonable
processing time.
In this example we use mapprojected images, which is the most reliable
way to align the images for correlation. Mapprojection is discussed
in :numref:`mapproj_with_cam2map` and :numref:`mapproj-example`. Note
that mapprojection can slow down the triangulation step, but given
that ``parallel_stereo`` performs the triangulation using multiple
processes, that is not a concern.
This example's recipe is is in the ``examples/CTX`` directory shipped
with ASP (type 'make' there to run it).
.. figure:: ../images/examples/ctx/n_terra_meridiani_ctx_combined.png
:name: ctx_example_fig
Example output possible with the CTX imager aboard MRO.
The images are for the North Terra Meridiani region.
Download
~~~~~~~~
Download the CTX images P02_001981_1823_XI_02N356W.IMG and
P03_002258_1817_XI_01N356W.IMG from PDS, at:
https://ode.rsl.wustl.edu/mars/indexproductsearch.aspx
The download commands are::
wget https://planetarydata.jpl.nasa.gov/img/data/mro/ctx/mrox_0031/data/P02_001981_1823_XI_02N356W.IMG
wget https://planetarydata.jpl.nasa.gov/img/data/mro/ctx/mrox_0042/data/P03_002258_1817_XI_01N356W.IMG
Creation of cub files
~~~~~~~~~~~~~~~~~~~~~
Install ISIS (:numref:`planetary_images`).
Convert the .IMG files to ISIS .cub files, initialize the metadata (SPICE), and
calibrate::
ISIS> mroctx2isis from = P02_001981_1823_XI_02N356W.IMG \
to = P02_001981_1823.cub
ISIS> mroctx2isis from = P03_002258_1817_XI_01N356W.IMG \
to = P03_002258_1817.cub
ISIS> spiceinit from = P02_001981_1823.cub web = true
ISIS> spiceinit from = P03_002258_1817.cub web = true
ISIS> ctxcal from = P02_001981_1823.cub to = P02_001981_1823.cal.cub
ISIS> ctxcal from = P03_002258_1817.cub to = P03_002258_1817.cal.cub
Optionally run ``ctxevenodd`` on the ``cal.cub`` files.
The `spiceinit
`_
command initializes the cub file metadata. The option ``web = true`` fetches the
needed data on-the-fly. If it does not work, it is necessary to download this
data manually, from the ``mro`` directory of the `ISIS data area
`_.
Running stereo
~~~~~~~~~~~~~~
Run ``parallel_stereo`` (:numref:`parallel_stereo`) and ``point2dem``
(:numref:`point2dem`)::
cam2map4stereo.py P02_001981_1823.cal.cub P03_002258_1817.cal.cub
parallel_stereo \
--stereo-algorithm asp_mgm --subpixel-mode 9 \
P02_001981_1823.map.cub P03_002258_1817.map.cub \
results/out
point2dem -r mars --stereographic --auto-proj-center \
results/out-PC.tif
Higher quality results can be obtained by using the ``aspm_mgm`` algorithm and
mapprojection (:numref:`nextsteps`).
It is recommended to bundle-adjust the CTX cameras before stereo
(:numref:`bundle_adjustment`). Then the ``cam2map4stereo.py`` script
cannot be used, but mapprojection can be done with ``mapproject``
(:numref:`mapproj-example`).
Further processing
~~~~~~~~~~~~~~~~~~
It is strongly suggested to use CSM camera models for improved performance
(:numref:`csm`). See :numref:`create_csm_linescan` for how to create CSM camera
models for linescan cameras, including for CTX.
CTX cameras can exhibit jitter. How to correct it is discussed in
:numref:`jitter_ctx`.
Shape-from-Shading with CTX images is illustrated in :numref:`sfs_ctx`.
Automated Processing of HiRISE and CTX
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
While he was at the University of Chicago, David Mayer developed a set of
scripts for automating Stereo Pipeline for CTX and HiRISE images. Those
scripts and more information can now be found at
https://github.com/USGS-Astrogeology/asp_scripts.
================================================
FILE: docs/examples/dawn.rst
================================================
.. _dawn_isis:
Dawn (FC) Framing Camera
------------------------
This is a NASA mission to visit two of the largest objects in the
asteroid belt, Vesta and Ceres. The framing camera on board Dawn is
quite small and packs only a resolution of 1024x1024 pixels. This means
processing time is extremely short. To its benefit, it seems that the
mission planners leave the framing camera on taking shots quite rapidly.
On a single pass, they seem to usually take a chain of FC images that
have a high overlap percentage. This opens the idea of using ASP to
process not only the sequential pairs, but also the wider baseline
shots. Then someone could potentially average all the DEMs together to
create a more robust data product.
For this example, we used the images FC21A0010191_11286212239F1T and
FC21A0010192_11286212639F1T which show the Cornelia crater on
Vesta. We learned about them from the anaglyph shown on the Planetary
Science Blog :cite:`planetaryblog:vesta`.
A different example (using CSM cameras) is in :numref:`csm_frame`.
.. figure:: ../images/examples/dawn/Vesta_figure.png
:name: dawn-nomap-example
Example colorized height map and ortho image output.
Commands
~~~~~~~~
First you must download and unzip the Dawn FC images from PDS from
https://sbib.psi.edu/data/PDS-Vesta/pds-vesta.html::
wget https://sbib.psi.edu/data/PDS-Vesta/HAMO/img-1A/FC21A0010191_11286212239F1T.IMG.gz
wget https://sbib.psi.edu/data/PDS-Vesta/HAMO/img-1A/FC21A0010192_11286212639F1T.IMG.gz
gunzip FC21A0010191_11286212239F1T.IMG.gz
gunzip FC21A0010192_11286212639F1T.IMG.gz
Then, these are converted to ISIS .cub files and ``parallel_stereo`` is run::
dawnfc2isis from=FC21A0010191_11286212239F1T.IMG \
to=FC21A0010191_11286212239F1T.cub target=VESTA
dawnfc2isis from=FC21A0010192_11286212639F1T.IMG \
to=FC21A0010192_11286212639F1T.cub target=VESTA
spiceinit from=FC21A0010191_11286212239F1T.cub
spiceinit from=FC21A0010192_11286212639F1T.cub
parallel_stereo FC21A0010191_11286212239F1T.cub \
FC21A0010192_11286212639F1T.cub stereo/stereo
A DEM is then created with ``point2dem`` (:numref:`point2dem`)::
point2dem stereo-PC.tif --orthoimage stereo-L.tif \
--t_srs "+proj=eqc +lat_ts=-11.5 +a=280000 +b=229000 +units=m"
The option ``target=VESTA`` is necessary with ISIS version 5, and is
likely not needed in later versions.
See :numref:`nextsteps` for a discussion about various
speed-vs-quality choices when it comes to stereo algorithms.
================================================
FILE: docs/examples/dg.rst
================================================
.. _digital_globe_example:
DigitalGlobe
------------
Processing of DigitalGlobe/Maxar images is described extensively in the
tutorial in :numref:`dg_tutorial`.
See :numref:`rpc` for how to process DigitalGlobe/Maxar data if using
the RPC model instead of the exact linescan one. See :numref:`examples`
for other examples.
================================================
FILE: docs/examples/hirise.rst
================================================
.. _hirise_example:
Mars Reconnaissance Orbiter HiRISE
----------------------------------
HiRISE is one of the most challenging cameras to use when making 3D
models because HiRISE exposures can be several gigabytes each. Working
with this data requires patience as it will take time.
One important fact to know about HiRISE is that it is composed of
multiple linear CCDs that are arranged side by side with some vertical
offsets. These offsets mean that the CCDs will view some of the same
terrain but at a slightly different time and a slightly different angle.
Mosaicking the CCDs together to a single image is not a simple process
and involves living with some imperfections.
One cannot simply use the HiRISE RDR products, as they do not have the
required geometric stability. Instead, the HiRISE EDR products must be
assembled using ISIS ``noproj``. The USGS distributes a script in use
by the HiRISE team that works forward from the team-produced 'balance'
cubes, which provides a de-jittered, noproj'ed mosaic of a single
observation, which is perfectly suitable for use by the Stereo
Pipeline (this script was originally engineered to provide input for
SOCET SET). However, the 'balance' cubes are not available to the
general public, and so we include a program (``hiedr2mosaic.py``
(:numref:`hiedr2mosaic`), written in Python, that will take PDS
available HiRISE EDR products and walk through the processing steps
required to provide good input images for ``parallel_stereo``.
The program takes all the red CCDs and projects them using the ISIS
``noproj`` command into the perspective of the RED5 CCD. From there,
``hijitreg`` is performed to work out the relative offsets between CCDs.
Finally the CCDs are mosaicked together using the average offset listed
from ``hijitreg`` using the ``handmos`` command, and the mosaic is
normalized with ``cubenorm``. Below is an outline of the processing.
::
hi2isis # Import HiRISE IMG to Isis
hical # Calibrate
histitch # Assemble whole-CCD images from the channels
spiceinit
spicefit # For good measure
noproj # Project all images into perspective of RED5
hijitreg # Work out alignment between CCDs
handmos # Mosaic to single file
cubenorm # Normalize the mosaic
To use our script, first download a set of HiRISE data. Here is an
example, using wget to fetch all RED CCDs for a dataset and process
them.
::
wget -r -l1 -np \
"http://hirise-pds.lpl.arizona.edu/PDS/EDR/ESP/ORB_029400_029499/ESP_029421_2300/" \
-A "*RED*IMG"
Alternately, you can pass the ``--download-folder`` option to
``hiedr2mosaic.py`` and pass in the URL of the web page containing the
EDR files as the only positional argument. This will cause the tool to
first download all of the RED CCD images to the specified folder and
then continue with processing.
::
hiedr2mosaic.py --download-folder hirise_example/ \
http://hirise-pds.lpl.arizona.edu/PDS/EDR/ESP/ORB_029400_029499/ESP_029421_2300/
Assuming you downloaded the files manually, go to the directory
containing the files. You can run the ``hiedr2mosaic.py`` program
without any arguments to view a short help statement, with the ``-h``
option to view a longer help statement, or just run the program on the
EDR files like so::
hiedr2mosaic.py *.IMG
If you have more than one observation's worth of EDRs in that directory,
then limit the program to just one observation's EDRs at a time, e.g.
``hiedr2mosaic.py PSP_001513_1655*IMG``. If you run into problems, try
using the ``-k`` option to retain all of the intermediary image files to
help track down the issue. The ``hiedr2mosaic.py`` program will create a
single mosaic file with the extension ``.mos_hijitreged.norm.cub``. Be
warned that the operations carried out by ``hiedr2mosaic.py`` can take
many hours to complete on the very large HiRISE images.
If you get any errors, make sure you have ISIS and its data installed, and the
environmental variable ``ISISDATA`` is set (:numref:`planetary_images`).
An example of using ASP with HiRISE data is included in the
``examples/HiRISE`` directory (just type 'make' there).
The dataset
~~~~~~~~~~~
HiRISE observations
`PSP_001513_1655 `_ and
`PSP_001777_1650 `_ are
on the floor of Gusev Crater and cover the area where the MER Spirit
landed and has roved, including the Columbia Hills.
.. figure:: ../images/examples/hirise/chills_hirise_combined.png
:name: hirise_chills_example
Example output using HiRISE images PSP_001513_1655 and
PSP_001777_1650 of the Columbia Hills.
Commands
~~~~~~~~
Download all 20 of the RED EDR ``.IMG`` files for each observation::
wget -r -l1 -np \
"http://hirise-pds.lpl.arizona.edu/PDS/EDR/PSP/ORB_001500_001599/PSP_001513_1655/" \
-A "*RED*IMG"
wget -r -l1 -np \
"http://hirise-pds.lpl.arizona.edu/PDS/EDR/PSP/ORB_001700_001799/PSP_001777_1650/" \
-A "*RED*IMG"
Then process::
ISIS> hiedr2mosaic.py PSP_001513_1655_RED*.IMG
ISIS> hiedr2mosaic.py PSP_001777_1650_RED*.IMG
ISIS> cam2map4stereo.py PSP_001777_1650_RED.mos_hijitreged.norm.cub \
PSP_001513_1655_RED.mos_hijitreged.norm.cub
ISIS> parallel_stereo PSP_001513_1655.map.cub \
PSP_001777_1650.map.cub result/output
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices.
The ``corr-kernel`` value can usually be safely reduced to 21 pixels
to resolve finer detail and faster processing for images with good
contrast.
================================================
FILE: docs/examples/historical.rst
================================================
.. _kh4:
Declassified satellite images: KH-4B
------------------------------------
ASP has preliminary support for the declassified high-resolution CORONA KH-4B images.
*This support is very experimental, and likely a lot of work is needed to make
it work reliably.*
For the latest suggested processing workflow, in the context of KH-9 images, see
:numref:`kh9`.
These images can be processed using either optical bar (panoramic) camera models
or as pinhole camera models with RPC distortion. Most of the steps are similar
to the example in :numref:`skysat-example`. The optical bar camera model is
based on :cite:`schenk2003rigorous` and :cite:`sohn2004mathematical`, whose
format is described in :numref:`panoramic`. For KH-9 images, the
Improvements suggested in :cite:`ghuffar2022pipeline` are incorporated
(:numref:`ghuffar_method`).
Fetching the data
~~~~~~~~~~~~~~~~~
KH-4B images are available via the USGS Earth Explorer, at
https://earthexplorer.usgs.gov/
(an account is required to download the data). We will work with the
KH-4B image pair::
DS1105-2248DF076
DS1105-2248DA082
To get these from Earth Explorer, click on the ``Data Sets`` tab and
select the three types of declassified data available, then in the
``Additional Criteria`` tab choose ``Declass 1``, and in the
``Entity ID`` field in that tab paste the above frames (if no results
are returned, one can attempt switching above to ``Declass 2``, etc).
Clicking on the ``Results`` tab presents the user with information about
these frames.
Clicking on ``Show Metadata and Browse`` for every image will pop-up a
table with meta-information. That one can be pasted into a text file,
named for example, ``DS1105-2248DF076.txt`` for the first image, from
which later the longitude and latitude of each image corner will be
parsed. Then one can click on ``Download Options`` to download the data.
.. _resizing_images:
Resizing the images
~~~~~~~~~~~~~~~~~~~
Sometimes the input images can be so large, that either the ASP tools
or the auxiliary ImageMagick ``convert`` program will fail, or the machine
will run out of memory.
It is suggested to resize the images to a more manageable size, at least for
initial processing. This is easiest to do by opening the images in
``stereo_gui`` (:numref:`stereo_gui`), which will create a pyramid of subsampled
("sub") images at 1/2 the full resolution, then 1/4th, etc. This resampling is
done using local averaging, to avoid aliasing effects.
Alternatively, one can call ``gdal_translate`` (:numref:`gdal_tools`), such as::
gdal_translate -outsize 25% 25% -r average input.tif output.tif
This will reduce the image size by a factor of 4. The ``-r average`` option will,
as before, help avoid aliasing.
A camera model (pinhole or optical bar) created at one resolution can be
converted to another resolution by adjusting the ``pitch`` parameter (a higher
value of pitch means bigger pixels so lower resolution). For optical bar cameras
the image dimensions and image center need to be adjusted as well, as those are
in units of pixels.
Stitching the images
~~~~~~~~~~~~~~~~~~~~
Each downloaded image will be made up of 2-4 portions, presumably due to
the limitations of the scanning equipment. They can be stitched together
using ASP's ``image_mosaic`` tool (:numref:`image_mosaic`).
For some reason, the KH-4B images are scanned in an unusual order. To
mosaic them, the last image must be placed first, the next to last
should be second, etc. In addition, as seen from the tables of metadata
discussed earlier, some images correspond to the ``Aft`` camera type.
Those should be rotated 180 degrees after mosaicking, hence below we use
the ``--rotate`` flag for that one. The overlap width is manually
determined by looking at two of the sub images in ``stereo_gui``.
With this in mind, image mosaicking for these two images will happen as
follows::
image_mosaic DS1105-2248DF076_d.tif DS1105-2248DF076_c.tif \
DS1105-2248DF076_b.tif DS1105-2248DF076_a.tif \
-o DS1105-2248DF076.tif \
--ot byte --overlap-width 7000 --blend-radius 2000
image_mosaic DS1105-2248DA082_d.tif DS1105-2248DA082_c.tif \
DS1105-2248DA082_b.tif DS1105-2248DA082_a.tif \
-o DS1105-2248DA082.tif \
--ot byte --overlap-width 7000 --blend-radius 2000 \
--rotate
In order to process with the optical bar camera model these images need
to be cropped to remove the most of empty area around the image. The
four corners of the valid image area can be manually found by clicking
on the corners in ``stereo_gui``. Note that for some input images it can
be unclear where the proper location for the corner is due to edge
artifacts in the film. Do your best to select the image corners such
that obvious artifacts are kept out and all reasonable image sections
are kept in.
ASP provides a simple Python tool called ``historical_helper.py`` to rotate the
image so that the top edge is horizontal while also cropping the boundaries.
This tool requires installing the ImageMagick software. See
:numref:`historical_helper` for more details.
Pass in the corner coordinates as shown below in the order top-left, top-right,
bot-right, bot-left (column then row). This is also a good opportunity to
simplify the file names going forwards.
::
historical_helper.py rotate-crop \
--interest-points '4523 1506 114956 1450 114956 9355 4453 9408' \
--input-path DS1105-2248DA082.tif \
--output-path aft.tif
historical_helper.py rotate-crop \
--interest-points '6335 1093 115555 1315 115536 9205 6265 8992' \
--input-path DS1105-2248DF076.tif \
--output-path for.tif
See :numref:`resizing_images` if these steps failed, as perhaps the images
were too large.
Fetching a ground truth DEM
~~~~~~~~~~~~~~~~~~~~~~~~~~~
To create initial cameras to use with these images, and to later refine and
validate the terrain model made from them, we will need a ground truth source.
Several good sets of DEMs exist, including SRTM, ASTER, and TanDEM-X
(:numref:`initial_terrain`). Here we will work with SRTM, which provides DEMs
with a 30-meter grid size. The bounds of the region of interest are inferred
from the tables with meta-information from above.
The SRTM DEM must be adjusted to be relative to the WGS84 datum, as discussed in
:numref:`conv_to_ellipsoid`.
The visualization of all images and DEMs can be done in ``stereo_gui``.
Creating camera files
~~~~~~~~~~~~~~~~~~~~~
ASP provides the tool named ``cam_gen`` (:numref:`cam_gen`) that, based on a
camera's intrinsics and the positions of the image corners on Earth's surface
will create initial camera models that will be the starting point for aligning
the cameras.
To create optical bar camera models, an example camera model file is
needed. This needs to contain all of the expected values for the camera,
though image_size, image_center, iC, and IR can be any value since they
will be recalculated. The pitch is determined by the resolution of the
scanner used, which is seven microns. The other values are determined by
looking at available information about the satellite. For the first
image (DS1105-2248DF076) the following values were used::
VERSION_4
OPTICAL_BAR
image_size = 13656 1033
image_center = 6828 517
pitch = 7.0e-06
f = 0.61000001430511475
scan_time = 0.5
forward_tilt = 0.2618
iC = -1030862.1946224371 5468503.8842079658 3407902.5154047827
iR = -0.95700845635275322 -0.27527006183758934 0.091439638698163225 -0.26345593052063937 0.69302501329766897 -0.67104940475144637 0.1213498543172795 -0.66629027007731101 -0.73575232847574434
speed = 7700
mean_earth_radius = 6371000
mean_surface_elevation = 4000
motion_compensation_factor = 1.0
scan_dir = right
For a description of each value, see :numref:`panoramic`. For
the other image (aft camera) the forward tilt was set to -0.2618 and
scan_dir was set to 'left'. The correct values for scan_dir (left or
right) and use_motion_compensation (1.0 or -1.0) are not known for
certain due to uncertainties about how the images were recorded and may
even change between launches of the KH-4 satellite. You will need to
experiment to see which combination of settings produces the best
results for your particular data set.
The metadata table from Earth Explorer has the following entries for
DS1105-2248DF076::
NW Corner Lat dec 31.266
NW Corner Long dec 99.55
NE Corner Lat dec 31.55
NE Corner Long dec 101.866
SE Corner Lat dec 31.416
SE Corner Long dec 101.916
SW Corner Lat dec 31.133
SW Corner Long dec 99.55
These correspond to the upper-left, upper-right, lower-right, and
lower-left pixels in the image. We will invoke ``cam_gen`` as follows::
cam_gen --sample-file sample_kh4b_for_optical_bar.tsai \
--camera-type opticalbar \
--lon-lat-values \
'99.55 31.266 101.866 31.55 101.916 31.416 99.55 31.133' \
for.tif --reference-dem dem.tif --refine-camera -o for.tsai
cam_gen --sample-file sample_kh4b_aft_optical_bar.tsai \
--camera-type opticalbar \
--lon-lat-values \
'99.566 31.266 101.95 31.55 101.933 31.416 99.616 31.15' \
aft.tif --reference-dem dem.tif --refine-camera -o aft.tsai
It is very important to note that if, for example, the upper-left image
corner is in fact the NE corner from the metadata, then that corner
should be the first in the longitude-latitude list when invoking this
tool.
Bundle adjustment and stereo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before processing the input images it is a good idea to experiment with
reduced resolution copies in order to accelerate testing. You can easily
generate reduced resolution copies of the images using ``stereo_gui`` as
shown below.
::
stereo_gui for.tif aft.tif --create-image-pyramids-only
ln -s for_sub8.tif for_small.tif
ln -s aft_sub8.tif aft_small.tif
cp for.tsai for_small.tsai
cp aft.tsai aft_small.tsai
The new .tsai files need to be adjusted by updating the image_size,
image_center (divide by resolution factor, which is 8 here), and the
pitch (multiply by the resolution factor) to account for the
downsample amount.
You can now run bundle adjustment on the downsampled images::
bundle_adjust for_small.tif aft_small.tif \
for_small.tsai aft_small.tsai \
-t opticalbar \
--max-iterations 100 \
--camera-weight 0 \
--tri-weight 0.1 \
--tri-robust-threshold 0.1 \
--disable-tri-ip-filter \
--skip-rough-homography \
--inline-adjustments \
--ip-detect-method 1 \
--datum WGS84 \
-o ba_small/run
Validation of cameras
~~~~~~~~~~~~~~~~~~~~~
An important sanity check is to mapproject the images with these
cameras, for example as::
mapproject dem.tif for.tif for.tsai for.map.tif
mapproject dem.tif aft.tif aft.tsai aft.map.tif
and then overlay the mapprojected images on top of the DEM in
``stereo_gui``. If it appears that the images were not projected
correctly, or there are gross alignment errors, likely the order of
image corners was incorrect. At this stage it is not unusual that the
mapprojected images are somewhat shifted from where they should be,
that will be corrected later.
This exercise can be done with the small versions of the images and
cameras, and also before and after bundle adjustment.
Running stereo
~~~~~~~~~~~~~~
Stereo with raw images::
parallel_stereo --stereo-algorithm asp_mgm \
for_small.tif aft_small.tif \
ba_small/run-for_small.tsai ba_small/run-aft_small.tsai \
--subpixel-mode 9 \
--alignment-method affineepipolar \
-t opticalbar --skip-rough-homography \
--disable-tri-ip-filter \
--ip-detect-method 1 \
stereo_small_mgm/run
It is strongly suggested to run stereo with *mapprojected images*, per
:numref:`mapproj-example`. Ensure the mapprojected images have the same
resolution, and overlay them on top of the initial DEM first, to check for gross
misalignment.
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices
in stereo.
.. _kh4_align:
DEM generation and alignment
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Next, a DEM is created, with an auto-determined UTM or polar
stereographic projection (:numref:`point2dem`)::
point2dem --auto-proj-center \
--tr 30 stereo_small_mgm/run-PC.tif
The grid size (``--tr``) is in meters.
The produced DEM could be rough. It is sufficient however to align to the SRTM
DEM by hillshading the two and finding matching features
(:numref:`pc_hillshade`)::
pc_align --max-displacement -1 \
--initial-transform-from-hillshading similarity \
--save-transformed-source-points \
--num-iterations 0 \
dem.tif stereo_small_mgm/run-DEM.tif \
-o stereo_small_mgm/run
Here one should choose carefully the transform type. The options are
``translation``, ``rigid``, and ``similarity`` (:numref:`pc_align_options`).
The resulting aligned cloud can be regridded as::
point2dem --auto-proj-center \
--tr 30 \
stereo_small_mgm/run-trans_source.tif
Consider examining in ``stereo_gui`` the left and right hillshaded files produced
by ``pc_align`` and the match file among them, to ensure tie points among
the two DEMs were found properly (:numref:`stereo_gui_view_ip`).
There is a chance that this may fail as the two DEMs to align could be too
different. In that case, the two DEMs can be regridded as in :numref:`regrid`,
say with a grid size of 120 meters. The newly obtained coarser SRTM DEM can be
aligned to the coarser DEM from stereo.
The alignment transform could later be refined or applied to the initial clouds
(:numref:`prevtrans`).
Floating the intrinsics
~~~~~~~~~~~~~~~~~~~~~~~
The obtained alignment transform can be used to align the cameras as
well, and then one can experiment with floating the intrinsics.
See :numref:`intrinsics_ground_truth`.
Modeling the camera models as pinhole cameras with RPC distortion
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Once sufficiently good optical bar cameras are produced and the
DEMs from them are reasonably similar to some reference terrain
ground truth, such as SRTM, one may attempt to improve the accuracy
further by modeling these cameras as simple pinhole models with the
nonlinear effects represented as a distortion model given by Rational
Polynomial Coefficients (RPC) of any desired degree (see
:numref:`pinholemodels`). The best fit RPC representation can be
found for both optical bar models, and the RPC can be further
optimized using the reference DEM as a constraint.
To convert from optical bar models to pinhole models with RPC distortion
one does::
convert_pinhole_model for_small.tif for_small.tsai \
-o for_small_rpc.tsai --output-type RPC \
--camera-to-ground-dist 300000 \
--sample-spacing 50 --rpc-degree 2
and the same for the other camera. Here, one has to choose carefully
the camera-to-ground-distance. Above it was set to 300 km.
The obtained cameras should be bundle-adjusted as before. One can
create a DEM and compare it with the one obtained with the earlier
cameras. Likely some shift in the position of the DEM will be present,
but hopefully not too large. The ``pc_align`` tool can be used to make
this DEM aligned to the reference DEM.
Next, one follows the same process as outlined in :numref:`skysat` and
:numref:`floatingintrinsics` to refine the RPC coefficients. It is suggested to
use the ``--heights-from-dem`` option as in that example. Here we use the more
complicated ``--reference-terrain`` option.
We will float the RPC coefficients of the left and right images independently,
as they are unrelated. The initial coefficients must be manually modified to be
at least 1e-7, as otherwise they will not be optimized. In the latest builds
this is done automatically by ``bundle_adjust`` (option ``--min-distortion``).
The command we will use is::
bundle_adjust for_small.tif aft_small.tif \
for_small_rpc.tsai aft_small_rpc.tsai \
-o ba_rpc/run --max-iterations 200 \
--camera-weight 0 --disable-tri-ip-filter \
--skip-rough-homography --inline-adjustments \
--ip-detect-method 1 -t nadirpinhole --datum WGS84 \
--force-reuse-match-files --reference-terrain-weight 1000 \
--parameter-tolerance 1e-12 --max-disp-error 100 \
--disparity-list stereo/run-unaligned-D.tif \
--max-num-reference-points 40000 --reference-terrain srtm.tif \
--solve-intrinsics \
--intrinsics-to-share 'focal_length optical_center' \
--intrinsics-to-float other_intrinsics --robust-threshold 10 \
--initial-transform pc_align/run-transform.txt
Here it is suggested to use a match file with dense interest points
(:numref:`dense_ip`). The initial transform is the transform written by
``pc_align`` applied to the reference terrain and the DEM obtained with the
camera models ``for_small_rpc.tsai`` and ``aft_small_rpc.tsai`` (with the
reference terrain being the first of the two clouds passed to the alignment
program). The unaligned disparity in the disparity list should be from the
stereo run with these initial guess camera models (hence stereo should be used
with the ``--unalign-disparity`` option). It is suggested that the optical
center and focal lengths of the two cameras be kept fixed, as RPC distortion
should be able model any changes in those quantities as well.
One can also experiment with the option ``--heights-from-dem`` instead
of ``--reference-terrain``. The former seems to be able to handle better
large height differences between the DEM with the initial cameras and
the reference terrain, while the latter is better at refining the
solution.
Then one can create a new DEM from the optimized camera models and see
if it is an improvement.
Another example of using RPC and an illustration is in :numref:`kh7_fig`.
.. _kh7:
Declassified satellite images: KH-7
-----------------------------------
KH-7 was an effective observation satellite that followed the Corona program. It
contained an index (frame) camera and a single strip (pushbroom) camera.
ASP has *no exact camera model for this camera.* An RPC distortion model can be
fit as in :numref:`dem2gcp`. See a figure in :numref:`kh7_fig`.
*This produces an approximate solution, which goes the right way but is likely
not good enough.*
For the latest suggested processing workflow, see the section on KH-9 images
(:numref:`kh9`).
For this example we find the following images in Earth Explorer
declassified collection 2::
DZB00401800038H025001
DZB00401800038H026001
Make note of the latitude/longitude corners of the images listed in Earth Explorer,
and note which image corners correspond to which compass locations.
It is suggested to resize the images to a more manageable size. This can
avoid failures in the processing below (:numref:`resizing_images`).
We will merge the images with the ``image_mosaic`` tool. These images have a
large amount of overlap and we need to manually lower the blend radius so that
we do not have memory problems when merging the images. Note that the image
order is different for each image.
::
image_mosaic DZB00401800038H025001_b.tif DZB00401800038H025001_a.tif \
-o DZB00401800038H025001.tif --ot byte --blend-radius 2000 \
--overlap-width 10000
image_mosaic DZB00401800038H026001_a.tif DZB00401800038H026001_b.tif \
-o DZB00401800038H026001.tif --ot byte --blend-radius 2000 \
--overlap-width 10000
For this image pair we will use the following SRTM images from Earth
Explorer::
n22_e113_1arc_v3.tif
n23_e113_1arc_v3.tif
dem_mosaic n22_e113_1arc_v3.tif n23_e113_1arc_v3.tif -o srtm_dem.tif
The SRTM DEM must be first adjusted to be relative to WGS84
(:numref:`conv_to_ellipsoid`).
Next we crop the input images so they only contain valid image area. We
use, as above, the ``historical_helper.py`` tool. See :numref:`historical_helper`
for how to install the ImageMagick software that it needs.
::
historical_helper.py rotate-crop \
--interest-points '1847 2656 61348 2599 61338 33523 1880 33567'\
--input-path DZB00401800038H025001.tif \
--output-path 5001.tif
historical_helper.py rotate-crop \
--interest-points '566 2678 62421 2683 62290 33596 465 33595' \
--input-path DZB00401800038H026001.tif \
--output-path 6001.tif
We will try to approximate the KH-7 camera using a pinhole model. The
pitch of the image is determined by the scanner, which is 7.0e-06 meters
per pixel. The focal length of the camera is reported to be 1.96 meters,
and we will set the optical center at the center of the image. We need
to convert the optical center to units of meters, which means
multiplying the pixel coordinates by the pitch to get units of meters.
Using the image corner coordinates which we recorded earlier, use the
``cam_gen`` tool (:numref:`cam_gen`) to generate camera models for each image,
being careful of the order of coordinates.
::
cam_gen --pixel-pitch 7.0e-06 --focal-length 1.96 \
--optical-center 0.2082535 0.1082305 \
--lon-lat-values '113.25 22.882 113.315 23.315 113.6 23.282 113.532 22.85' \
5001.tif --reference-dem srtm_dem.tif --refine-camera -o 5001.tsai
cam_gen --pixel-pitch 7.0e-06 --focal-length 1.96 \
--optical-center 0.216853 0.108227 \
--lon-lat-values '113.2 22.95 113.265 23.382 113.565 23.35 113.482 22.915' \
6001.tif --reference-dem srtm_dem.tif --refine-camera -o 6001.tsai
A quick way to evaluate the camera models is to use the
``camera_footprint`` tool to create KML footprint files, then look at
them in Google Earth. For a more detailed view, you can mapproject them
and overlay them on the reference DEM in ``stereo_gui``.
::
camera_footprint 5001.tif 5001.tsai --datum WGS_1984 --quick \
--output-kml 5001_footprint.kml -t nadirpinhole --dem-file srtm_dem.tif
camera_footprint 6001.tif 6001.tsai --datum WGS_1984 --quick \
--output-kml 6001_footprint.kml -t nadirpinhole --dem-file srtm_dem.tif
The output files from ``cam_gen`` will be roughly accurate but they may
still be bad enough that ``bundle_adjust`` has trouble finding a
solution. One way to improve your initial models is to use ground
control points.
For this test case it was possible to match features along the rivers to the
same rivers in a hillshaded version of the reference DEM. Three sets of GCPs
(:numref:`bagcp`) were created, one for each image individually and a joint set for
both images. Then ``bundle_adjust`` was run individually for each camera using
the GCPs.
::
bundle_adjust 5001.tif 5001.tsai gcp_5001.gcp \
-t nadirpinhole --inline-adjustments \
--num-passes 1 --camera-weight 0 \
--ip-detect-method 1 -o bundle_5001/out \
--max-iterations 30 --fix-gcp-xyz
bundle_adjust 6001.tif 6001.tsai gcp_6001.gcp \
-t nadirpinhole --inline-adjustments \
--num-passes 1 --camera-weight 0 \
--ip-detect-method 1 -o bundle_6001/out \
--max-iterations 30 --fix-gcp-xyz
Check the GCP pixel residuals at the end of the produced residual file
(:numref:`ba_err_per_point`).
At this point it is a good idea to experiment with lower-resolution copies of
the input images before running processing with the full size images. You can
generate these using ``stereo_gui``
::
stereo_gui 5001.tif 6001.tif --create-image-pyramids-only
ln -s 5001_sub16.tif 5001_small.tif
ln -s 6001_sub16.tif 6001_small.tif
Make copies of the camera files for the smaller images::
cp 5001.tsai 5001_small.tsai
cp 6001.tsai 6001_small.tsai
Multiply the pitch in the produced cameras by the resolution scale factor.
Now we can run ``bundle_adjust`` and ``parallel_stereo``. If you are using the
GCPs from earlier, the pixel values will need to be scaled to match the
subsampling applied to the input images.
::
bundle_adjust 5001_small.tif 6001_small.tif \
bundle_5001/out-5001_small.tsai \
bundle_6001/out-6001_small.tsai \
gcp_small.gcp -t nadirpinhole -o bundle_small_new/out \
--force-reuse-match-files --max-iterations 30 \
--camera-weight 0 --disable-tri-ip-filter \
--skip-rough-homography \
--inline-adjustments --ip-detect-method 1 \
--datum WGS84 --num-passes 2
parallel_stereo --alignment-method homography \
--skip-rough-homography --disable-tri-ip-filter \
--ip-detect-method 1 --session-type nadirpinhole \
--stereo-algorithm asp_mgm --subpixel-mode 9 \
5001_small.tif 6001_small.tif \
bundle_small_new/out-out-5001_small.tsai \
bundle_small_new/out-out-6001_small.tsai \
st_small_new/out
A DEM is created with ``point2dem`` (:numref:`point2dem`)::
point2dem --auto-proj-center st_small_new/out-PC.tif
The above may produce a DEM with many holes. It is strongly suggested to run
stereo with *mapprojected images* (:numref:`mapproj-example`). Use the ``asp_mgm``
algorithm. See also :numref:`nextsteps` for a discussion about various
speed-vs-quality choices in stereo.
.. figure:: ../images/kh7_dem.png
:name: kh7_fig
An example of a DEM created from KH-7 images after modeling distortion with RPC
of degree 3 (within the green polygon), on top of a reference terrain. GCP were used (:numref:`dem2gcp`), as well as mapprojected images and the ``asp_mgm``
algorithm.
Fitting an RPC model to the cameras with the help of GCP created by the
``dem2gcp`` program (:numref:`dem2gcp`) can greatly help improve the produced
DEM. See an illustration in :numref:`kh7_fig`, and difference maps in
:numref:`kh7_orig_vs_opt`.
.. _kh9:
Declassified satellite images: KH-9
-----------------------------------
The KH-9 satellite contained one frame camera and two panoramic cameras,
one pitched forward and one aft.
The frame camera is a regular pinhole model (:numref:`pinholemodels`).
The images produced with it could be handled as for KH-7 (:numref:`kh7`),
SkySat (:numref:`skysat`), or using Structure-from-Motion (:numref:`sfm`).
This example describes how to process the KH-9 panoramic camera images.
The workflow below is more recent than the one for KH-4B (:numref:`kh4`)
or KH-7, and it requires the latest build (:numref:`release`).
*The ASP support for panoramic images is highly experimental and is work in
progress.*
Image mosaicking
~~~~~~~~~~~~~~~~
For this example we use the following images from the Earth Explorer
declassified collection 3::
D3C1216-200548A041
D3C1216-200548F040
Make note of the latitude/longitude corners of the images listed in Earth
Explorer and corresponding raw image corners.
It is suggested to resize the images to a more manageable size, such as 1/16th
the original image resolution, at least to start with
(:numref:`resizing_images`). This can avoid failures with ImageMagick in the
processing below when the images are very large.
We merge the images with ``image_mosaic`` (:numref:`image_mosaic`)::
image_mosaic \
D3C1216-200548F040_a.tif D3C1216-200548F040_b.tif \
D3C1216-200548F040_c.tif D3C1216-200548F040_d.tif \
D3C1216-200548F040_e.tif D3C1216-200548F040_f.tif \
D3C1216-200548F040_g.tif D3C1216-200548F040_h.tif \
D3C1216-200548F040_i.tif D3C1216-200548F040_j.tif \
D3C1216-200548F040_k.tif D3C1216-200548F040_l.tif \
--ot byte --overlap-width 3000 \
-o D3C1216-200548F040.tif
image_mosaic \
D3C1216-200548A041_a.tif D3C1216-200548A041_b.tif \
D3C1216-200548A041_c.tif D3C1216-200548A041_d.tif \
D3C1216-200548A041_e.tif D3C1216-200548A041_f.tif \
D3C1216-200548A041_g.tif D3C1216-200548A041_h.tif \
D3C1216-200548A041_i.tif D3C1216-200548A041_j.tif \
D3C1216-200548A041_k.tif --overlap-width 1000 \
--ot byte -o D3C1216-200548A041.tif --rotate
These images also need to be cropped to remove most of the area around
the images::
historical_helper.py rotate-crop \
--input-path D3C1216-200548F040.tif \
--output-path for.tif \
--interest-points '2414 1190 346001 1714
345952 23960 2356 23174'
historical_helper.py rotate-crop \
--input-path D3C1216-200548A041.tif \
--output-path aft.tif \
--interest-points '1624 1333 346183 1812
346212 24085 1538 23504'
We used, as above, the ``historical_helper.py`` tool. See
:numref:`historical_helper` for how to install the ImageMagick software that it
needs.
Reference terrain
~~~~~~~~~~~~~~~~~
Fetch a reference DEM for the given site (:numref:`initial_terrain`). It
should be converted to be relative to the WGS84 datum
(:numref:`conv_to_ellipsoid`) and to a local UTM projection with ``gdalwarp``
with bicubic interpolation (:numref:`gdal_tools`). We will call this terrain
``ref.tif``. This terrain will help with registration later.
For the purpose of mapprojection, the terrain should be blurred to attenuate any
misalignment (:numref:`dem_prep`). The blurred version of this will be called
``ref_blur.tif``.
.. _ghuffar_method:
Modeling the cameras
~~~~~~~~~~~~~~~~~~~~
We follow the approach in :cite:`ghuffar2022pipeline`. This work
makes the following additional improvements as compared to the prior
efforts in :numref:`kh4`:
- The satellite velocity is a 3D vector, which is solved for independently,
rather than being tied to satellite pose and camera tilt.
- It is not assumed that the satellite pose is fixed during scanning. Rather,
there are two camera poses, for the starting and ending scan times, with
*slerp* interpolation in between.
- The scan time and scalar speed are absorbed into the motion compensation factor.
- The forward tilt is not modeled, hence only the camera pose is taken into
account, rather than the satellite pose and its relation to the camera pose.
Sample camera format
~~~~~~~~~~~~~~~~~~~~
It is strongly advised to work at 1/16th resolution of the original images, as
the images are very large. Later, any optimized cameras can be adjusted to be at
a different resolution, as documented in :numref:`resizing_images`.
At 1/16th the resolution, a sample Panoramic (OpticalBar) camera file, before
refinements of intrinsics and extrinsics, has the form::
VERSION_4
OPTICAL_BAR
image_size = 21599 1363
image_center = 10799.5 681.5
pitch = 0.000112
f = 1.52
scan_time = 1
forward_tilt = 0
iC = 0 0 0
iR = 1 0 0 0 1 0 0 0 1
speed = 0
mean_earth_radius = 6371000
mean_surface_elevation = 0
motion_compensation_factor = 0
scan_dir = right
velocity = 0 0 0
We call this file ``sample_sub16.tsai``.
There are several notable differences with the optical bar models before the
workflow and modeling was updated (:numref:`ghuffar_method`). Compared to the
sample file in :numref:`panoramic`, the scan time, forward tilt, speed, mean
surface elevation, and motion compensation factor are set to nominal values.
The additional ``velocity`` field is present, which for now has zero values. If
this field is not set, the prior optical bar logic will be invoked. Hence
internally both implementations are still supported.
The ``iR`` matrix has the starting camera pose. If the ending camera pose is not
provided, it is assumed to be the same as the starting one. When an optical bar
model is saved, the ending camera pose will be added as a line of the form::
final_pose = 0.66695010211673467 2.3625446924332656 1.5976801601116621
This represents a rotation in the axis-angle format, unlike ``iR`` which is
shown in regular matrix notation. The discrepancy in notation is for backward
compatibility.
The focal length is 1.52 m, per existing documentation. The pixel pitch (at
which the film is scanned) is known to be 7e-6 m. Here it is multiplied by 16 to
account for the fact that we work at 1/16th the resolution of the original
images.
The image size comes from the image file (at the current lower resolution). The
image center is set to half the image size.
Creation of initial cameras
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Camera files are generated using ``cam_gen`` (:numref:`cam_gen`), with the help
of the sample file from above. Let the Fwd image at 1/16th resolution be
called ``fwd_sub16.tif``. The command is::
cam_gen \
--sample-file sample_sub16.tsai \
--camera-type opticalbar \
--lon-lat-values \
'-151.954 61.999 -145.237 61.186
-145.298 60.944 -152.149 61.771' \
fwd_sub16.tif \
--reference-dem ref.tif \
--refine-camera \
--gcp-file fwd_sub16.gcp \
-o fwd_sub16.tsai
This creates the camera file ``fwd_sub16.tsai`` and the GCP file
``fwd_sub16.gcp``.
The historical images are often cropped after being scanned, and the image size
and optical center (image center) will be different for each image. The command
above will write the correct image size in the output file, and will set the
optical center to half of that. Hence, the entries for these in the sample file
will be ignored.
An analogous command is run for the Aft camera.
The longitude-latitude corners must correspond to the expected traversal of
the raw (non-mapprojected) image corners (:numref:`cam_gen_pinhole`). This
requires some care, especially given that the Fwd and Aft images have 180
degrees of rotation between them.
Validation of guessed cameras
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The produced cameras should be verified by mapprojection (:numref:`mapproject`)::
mapproject \
--tr 12 \
ref_blur.tif \
fwd_sub16.tif \
fwd_sub16.tsai \
fwd_sub16.map.tif
The grid size (``--tr``) is in meters. Here, it was known from existing information
that the ground sample distance at full resolution was about 0.75 m / pixel.
This was multiplied by 16 given the lower resolution used here. If not known,
the grid size can be auto-guessed by this program.
The Fwd and Aft mapprojected images should be overlaid with georeference
information on top of the reference DEM. It is expected that the general
position and orientation would be good, but there would be a lot of warping due
to unknown intrinsics.
.. figure:: ../images/kh9_initial_cameras.png
:name: kh9_init_fig
An example of Fwd and Aft mapprojected images. Notable warping is observed.
Optimization of cameras
~~~~~~~~~~~~~~~~~~~~~~~
We will follow the bundle adjustment approach outlined in
:numref:`heights_from_dem`.
The quantities to be optimized are the extrinsics (camera position and starting
orientation), and the intrinsics, which include the image center (optical
offset), focal length, motion compensation factor, velocity vector, and the ending
orientation. The last three fall under the ``other_intrinsics`` category in
bundle adjustment. The command can be as follows::
bundle_adjust \
fwd_sub16.tif aft_sub16.tif \
fwd_sub16.tsai aft_sub16.tsai \
--mapprojected-data \
'fwd_sub16.map.tif aft_sub16.map.tif' \
fwd_sub16.gcp aft_sub16.gcp \
--inline-adjustments \
--solve-intrinsics \
--intrinsics-to-float other_intrinsics \
--intrinsics-to-share none \
--heights-from-dem ref.tif \
--heights-from-dem-uncertainty 10000 \
--ip-per-image 100000 \
--ip-inlier-factor 1000 \
--remove-outliers-params '75 3 1000 1000' \
--num-iterations 250 \
-o ba/run
We passed in the GCP files produced earlier, that have information about the
ground coordinates of image corners. We made use of mapprojected images for
interest point matching (:numref:`mapip`).
The values for ``--heights-from-dem-uncertainty``, ``--ip-per-image``,
``--ip-inlier-factor``, and ``--remove-outliers-params`` are much larger than
usual, because of the very large distortion seen above. Otherwise too many valid
interest points may be eliminated. Later these parameters can be tightened.
Check the produced clean match files with ``stereo_gui``
(:numref:`stereo_gui_pairwise_matches`). It is very important to have many of
them in the corners and roughly uniformly distributed across the images. One
could also consider adding the ``--ip-per-tile`` and ``--matches-per-tile``
parameters (:numref:`ba_ip`). These would need some tuning at each resolution to
ensure the number of matches is not overly large.
The updated cameras will be saved in the output directory. These should be
validated by mapprojection as before.
We did not optimize for now the focal length and optical center, as they
were known more reliably than the other intrinsics. All these can be optimized
together in a subsequent pass.
See :numref:`bundle_adjust` for more information about ``bundle_adjust`` and
the various report files that are produced.
.. figure:: ../images/kh9_opt_cameras.png
:name: kh9_opt_fig
Fwd and Aft mapprojected images, after optimization of intrinsics. The images
are much better aligned.
Creation of a terrain model
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Inspect the mapprojected images created with the new cameras. They will likely
be more consistent than before. Look at the convergence angles report
(:numref:`ba_conv_angle`). Hopefully this angle will have a reasonable value,
such as between 10 and 40 degrees.
Run stereo and DEM creation with the mapprojected images and the ``asp_mgm``
algorithm. It is suggested to follow very closely the steps in
:numref:`mapproj-example`.
Fixing horizontal registration errors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It is quite likely that the mapprojected images after the last bundle adjustment
are much improved, but the stereo terrain model still shows systematic issues
relative to the reference terrain.
Then, the ``dem2gcp`` program (:numref:`dem2gcp`) can be invoked to create GCP
that can fix this misregistration. Pass to this program the option
``--max-disp`` if the disparity that is an input to that tool is not accurate in
flat areas.
Bundle adjustment can happen with these dense GCP, while optimizing all
intrinsics and extrinsics. We will share none of the intrinsics (the optical
center, at least, must be unique for each individual image due to how they are
scanned and cropped). Afterwards, a new stereo DEM can be created as before.
If happy enough with results at a given resolution, the cameras can be rescaled
to a finer resolution and the process continued. See :numref:`resizing_images`
for how a camera model can be modified to work at a different resolution.
Fixing local warping
~~~~~~~~~~~~~~~~~~~~
The panoramic (OpticalBar) camera model we worked with may not have enough
degrees of freedom to fix issues with local warping that arise during the
storage of the film having the historical images or its subsequent digitization.
To address that, the cameras can be converted to CSM linescan format (and the
images rotated by 90 degrees). See :numref:`opticalbar2csm`.
Then, the jitter solver (:numref:`jitter_solve`) can be employed. It is
suggested to set ``--num-lines-per-position`` and
``--num-lines-per-orientation`` for this program so that there exist about 10-40
position and orientation samples along the scan direction.
This program can also accept GCP files, just like ``bundle_adjust``.
We invoked it as follows::
jitter_solve \
fwd_sub16.tif aft_sub16.tif \
fwd_sub16.json aft_sub16.json \
fwd_sub16.gcp aft_sub16.gcp \
--match-files-prefix ba/run \
--num-lines-per-position 1000 \
--num-lines-per-orientation 1000 \
--heights-from-dem ref.tif \
--heights-from-dem-uncertainty 500 \
--max-initial-reprojection-error 100 \
--num-iterations 100 \
-o jitter_sub16/run
The GCP had a sigma of 100 or so, so less uncertainty than in
``--heights-from-dem-uncertainty``, by a notable factor. At higher resolution,
and if confident in GCP, one can reduce the GCP uncertainty further.
In practice we found that after one pass of the jitter solver and stereo DEM
creation, it may be needed to run GCP creation with ``dem2gcp`` and bundle
adjustment again to refine all the intrinsics, including focal length and lens
distortion, this time with the CSM linescan model. Then, one can invoke
``jitter_solve`` one more time. Each step should offer a further improvement in
results.
For fine-level control over interest point matches, dense matches from disparity
are suggested (:numref:`dense_ip`).
If the satellite acquired several overlapping pairs of images in quick
succession, it is suggested to use them together, as that can improve the
registration.
The linescan cameras are not as easy to convert to a different resolution as the
OpticalBar cameras. An `experimental program `_
for this is available.
================================================
FILE: docs/examples/hrsc.rst
================================================
.. _hrsc_example:
Mars Express High Resolution Stereo Camera (HRSC)
-------------------------------------------------
The HRSC camera on the Mars Express satellite is a complicated system,
consisting of multiple channels pointed in different directions plus
another super resolution channel. The best option to create DEMs is to
use the two dedicated stereo channels. These are pointed ahead of and
behind the nadir channel and collect a stereo observation in a single
pass of the satellite.
Since each observation contains both stereo channels, one observation is
sufficient to create a DEM.
Data can be downloaded from the `HRSC node `_ in the Planetary Data System (PDS).
HRSC data is organized into categories. Level 2 is radiometrically
corrected, level 3 is corrected and mapprojected onto MOLA, and level 4
is corrected and mapprojected on to a DEM created from the HRSC data.
You should use the level 2 data for creating DEMs with ASP. If you would
like to download one of the already created DEMs, it may be easiest to
use the areoid referenced version (.da4 extension) since that is
consistent with MOLA.
Preparing the data
~~~~~~~~~~~~~~~~~~
Fetch the two stereo channels using ``wget`` from::
https://pds-geosciences.wustl.edu/mex/mex-m-hrsc-3-rdr-v4/mexhrs_4000/data/1995/h1995_0000_s13.img
https://pds-geosciences.wustl.edu/mex/mex-m-hrsc-3-rdr-v4/mexhrs_4000/data/1995/h1995_0000_s23.img
.. figure:: ../images/examples/hrsc/hrsc_example.png
:name: hrsc_figure
Sample outputs from a cropped region of HRSC frame 1995. Left: Cropped input.
Center: Block matching with subpixel mode 3. Right: MGM algorithm with cost
mode 3.
See :numref:`planetary_images` for how to set up ISIS and download the needed
kernels. For HRSC, they are part of the ``mex`` dataset.
It appears that ``hrsc2isis`` is not able to read the level 3 images that were
downloaded above, and PDS no longer offers level 2 images. What seems to work is
edit the .img files and change level 3 to level 2. That can be done with::
perl -pi -e 's#(PROCESSING_LEVEL_ID\s+=) 3#$1 2#g' *.img
Then run::
hrsc2isis from=h1995_0000_s13.img to=h1995_0000_s13.cub
hrsc2isis from=h1995_0000_s23.img to=h1995_0000_s23.cub
spiceinit from=h1995_0000_s13.cub ckpredicted=true
spiceinit from=h1995_0000_s23.cub ckpredicted=true
Here we added the ``ckpredicted=true`` flag to ``spiceinit``. Adding
``web=true`` can help avoid downloading the kernels, if this works. See the
(`spiceinit documentation `_).
Running stereo
~~~~~~~~~~~~~~
HRSC images are large and may have compression artifacts so you should
experiment running stereo on a small region with ``stereo_gui``
(:numref:`stereo_gui`).
The suggested command to run on the full images is::
parallel_stereo h1995_0000_s13.cub h1995_0000_s23.cub \
--stereo-algorithm asp_mgm \
--cost-mode 3 \
mgm/out
See :numref:`nextsteps` for other stereo algorithms, and information on
tradeoffs between them.
A DEM is created with ``point2dem`` (:numref:`point2dem`)::
point2dem \
--stereographic --auto-proj-center \
mgm/out-PC.tif
================================================
FILE: docs/examples/isis_minirf.rst
================================================
.. _isis_minirf:
LRO Mini-RF using ISIS camera models
------------------------------------
See :numref:`csm_minirf`. That example uses CSM cameras. Running it
with ISIS ``.cub`` cameras amounts to replacing ``.json`` with
``.cub`` in all commands.
================================================
FILE: docs/examples/junocam.rst
================================================
.. _junocam:
JunoCam
-------
JunoCam is an optical camera on the Juno spacecraft. It has been taking images
of Jupiter and its satellites since 2016.
This example shows how to produce terrain models and ortho images from JunoCam
images for Ganymede, the largest moon of Jupiter. These will be registered
to the Voyager-Galileo global mosaic of Ganymede.
Fetching the data
~~~~~~~~~~~~~~~~~
Set (in bash)::
url=https://planetarydata.jpl.nasa.gov/img/data/juno/JNOJNC_0018/DATA/RDR/JUPITER/ORBIT_34/
Download the .IMG and .LBL files for two observations::
for f in JNCR_2021158_34C00001_V01 JNCR_2021158_34C00002_V01; do
for ext in .IMG .LBL; do
wget $url/$f$ext
done
done
Preparing the data
~~~~~~~~~~~~~~~~~~
Ensure ISIS is installed (:numref:`planetary_images`).
Create ISIS .cub files::
for f in JNCR_2021158_34C00001_V01 JNCR_2021158_34C00002_V01; do
junocam2isis fr = $f.LBL to = $f.cub fullccd = yes
done
This will result in many files, because JunoCam acquires multiple overlapping images
in quick succession.
Run ``spiceinit`` to get the camera pointing and other information::
for f in *.cub; do
spiceinit from = $f web = true
done
If the ``web = true`` option does not work, the ISIS data for the ``juno`` mission
needs to be downloaded, per the ISIS documentation.
We will put all these files into an ``img`` subdirectory.
.. figure:: ../images/junocam_raw.png
JunoCam images JNCR_2021158_34C00001_V01_0012 and
JNCR_2021158_34C00002_V01_0010 as shown by ``stereo_gui``
(:numref:`stereo_gui`). The shared area and a couple of matching craters are
highlighted.
A JunoCam image is made of 3 framelets, each about 128 pixels tall. The image
width is 1648 pixels. Consecutive images have overlapping areas, which helps
eliminate the effect of the gaps between the framelets.
External reference
~~~~~~~~~~~~~~~~~~
We will pixel-level register the JunoCam images to the 1 km / pixel Ganymede `Voyager -
Galileo global mosaic
`_.
Crop from it a portion that covers our area of interest as::
win="5745386 2069139 7893530 36002"
gdal_translate -projwin $win \
Ganymede_Voyager_GalileoSSI_global_mosaic_1km.tif \
galileo_ortho.tif
This will be stored in a subdirectory named ``ref``.
Initial DEM
~~~~~~~~~~~
Both image registration and stereo DEM creation benefit from mapprojecting the
JunoCam images onto a prior low-resolution DEM. A reasonably good such DEM can
be created by considering the surface zero height corresponding to the earlier
orthoimage clip.
Given that the elevations on Ganymede are on the order of 1 km, which is about
the image resolution, this works well enough. Such a DEM is produced with
``image_calc`` (:numref:`image_calc`) as::
image_calc -c "var_0 * 0" \
--output-nodata-value -1e+6 \
-d float32 \
ref/galileo_ortho.tif \
-o ref/flat_dem.tif
Image selection
~~~~~~~~~~~~~~~
We chose to focus on the highest resolution JunoCam images of Ganymede, as that
resulted in the best terrain model. The quality of the terrain model degraded
towards the limb, as expected. We worked with the images::
JNCR_2021158_34C00001_V01_0009
JNCR_2021158_34C00001_V01_0010
JNCR_2021158_34C00001_V01_0011
JNCR_2021158_34C00001_V01_0012
JNCR_2021158_34C00001_V01_0013
and corresponding images that have ``34C00002`` in their name.
Mapprojection
~~~~~~~~~~~~~
Each image was mapprojected (:numref:`mapproject`) at 1 km / pixel resolution,
with a command such as::
mapproject --tr 1000 \
ref/flat_dem.tif \
img/JNCR_2021158_34C00001_V01_0010.cub \
map/JNCR_2021158_34C00001_V01_0010.map.tif
GCP creation
~~~~~~~~~~~~
We will create GCP that ties each JunoCam image to the reference
Voyager-Galileo image mosaic with ``gcp_gen`` (:numref:`gcp_gen`),
invoked as follows::
f=JNCR_2021158_34C00001_V01_0010
gcp_gen \
--ip-detect-method 0 \
--inlier-threshold 50 \
--ip-per-image 20000 \
--individually-normalize \
--camera-image img/${f}.cub \
--mapproj-image map/${f}.map.tif \
--ortho-image ref/galileo_ortho.tif \
--dem ref/flat_dem.tif \
--gcp-sigma 1000 \
--output-prefix gcp/run \
--output-gcp gcp/${f}.gcp
We set ``--gcp-sigma 1000``, which is rather high, as we do not know the exact
DEM that was employed to produce the reference image mosaic. The option ``--individually-normalize`` was essential, as these images come from different
sources.
.. figure:: ../images/junocam_galileo_matches.png
Interest point matches between mapprojected image
JNCR_2021158_34C00001_V01_0010 and the Voyager-Galileo mosaic. GCP are made
from these matches.
Bundle adjustment
~~~~~~~~~~~~~~~~~
Bundle adjustment (:numref:`parallel_bundle_adjust`) was run with the 10 images
selected earlier and the GCP files::
parallel_bundle_adjust \
img/JNCR_2021158_34C0000[1-2]_V01_000[9-9].cub \
img/JNCR_2021158_34C0000[1-2]_V01_001[0-3].cub \
--ip-per-image 20000 \
--num-iterations 500 \
gcp/*.gcp \
-o ba/run
Stereo terrain creation
~~~~~~~~~~~~~~~~~~~~~~~
We ran ``parallel_stereo`` on *every combination* of overlapping images between
the ``34C00001`` set and the ``34C00002`` one::
l=JNCR_2021158_34C00001_V01_xxxx
r=JNCR_2021158_34C00002_V01_yyyy
pref=stereo_map/${l}_${r}/run
parallel_stereo \
map/$l.map.tif map/$r.map.tif \
img/$l.cub img/$r.cub \
--bundle-adjust-prefix ba/run \
--ip-per-image 20000 \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--subpixel-kernel 7 7 \
--nodata-value 0 \
--num-matches-from-disparity 10000 \
${pref} \
ref/flat_dem.tif
Here we used a small ``subpixel-kernel`` of 7 x 7 pixels, to ensure as little as
possible is eroded from the already narrow images. Note that the ``asp_mgm``
algorithm default ``corr-kernel`` value is 5 x 5 pixels
(:numref:`stereodefault`).
The stereo convergence angle (:numref:`stereo_pairs`) was about 35 degrees, which
is very good.
Set the output projection (the same as in the reference image mosaic)::
proj='+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=180 +x_0=0 +y_0=0 +R=2632344.9707 +units=m +no_defs'
Then, ``point2dem`` (:numref:`point2dem`) was run::
point2dem \
--t_srs "$proj" \
--tr 1000 \
${pref}-PC.tif \
--orthoimage \
${pref}-L.tif
This was followed by mosaicking of DEMs and orthoimages with ``dem_mosaic``
(:numref:`dem_mosaic`), and colorization with ``colormap`` (:numref:`colormap`).
.. figure:: ../images/junocam_dem_drg.png
Left: Mosaicked DEM created from stereo of JunCam images. The color range
corresponds to elevations between -1500 and 1500 meters. Right: produced
JunoCam orthoimage overlaid on top of the Voyager-Galileo global mosaic.
The results of this processing are shown in the figure above. Three things are notable:
- The image registration is pixel-level.
- There are some seams at the top and bottom. Those can be eliminated
with more images.
- There are systematic artifacts in the elevations.
The latter issue is likely due to not well-modeled distortion and TDI effects,
given the JunoCam camera design. This will be fixed in the next section.
Handling gaps between DEMs
~~~~~~~~~~~~~~~~~~~~~~~~~~
The JunoCam images have very little overlap and sometimes that results in gaps
between the DEMs produced from stereo. These are most notable around the first
and last DEMs in any given sequence. So adding more overlapping stereo pairs
pairs at the beginning and end of the sequence will help cover those areas.
Consider decreasing the grid size for mapprojection as well, say from 1000 m to
800 m, which should result in more pixels in overlapping areas.
These parameter changes in the workflow above may also help:
- ``--edge-buffer-size 1`` or ``0`` (the default is larger)
- ``--subpixel-kernel 5 5`` (instead of ``7 7``)
- ``--corr-kernel 3 3`` (the default is ``5 5`` for ``asp_mgm``)
These parameters are described in :numref:`stereodefault`.
Intrinsics refinement
~~~~~~~~~~~~~~~~~~~~~
.. _junocam_opt:
.. figure:: ../images/junocam_dem_opt.png
Left: The earlier mosaicked DEM created from stereo of JunCam images. Right:
the produced DEM after optimizing the lens distortion with a DEM constraint.
These are plotted with the same range of of elevations (-1500 to 1500 meters).
The systematic artifacts are much less pronounced.
To address the systematic elevation artifacts, we will refine the intrinsics and
extrinsics of the cameras, while using the zero elevation DEM as a ground
constraint (with an uncertainty).
The approach in :numref:`ba_frame_linescan` is followed.
We will make use of dense matches from disparity, as in :numref:`dense_ip`. The
option for that, ``--num-matches-from-disparity``, was already set in the stereo
runs above. We got good results with sparse matches too, as produced by
``bundle_adjust``, if there are a lot of them, but dense matches offer more
control over the coverage.
These matches will augment existing sparse matches in the ``ba`` directory. For
that, first the sparse matches will be copied to a new directory, called
``dense_matches``. Then, we will copy on top the small number of dense matches
from each stereo directory above, while removing the string ``-disp`` from each
such file name, and ensuring each corresponding sparse match file is overwritten.
It is necessary to create CSM cameras (:numref:`csm`) for the JunoCam images, to
be able to optimize the intrinsics. For the first camera, that is done with the
``cam_gen`` program (:numref:`cam_gen`), with a command such as::
cam_gen img/JNCR_2021158_34C00001_V01_0010.cub \
--input-camera img/JNCR_2021158_34C00001_V01_0010.cub \
--reference-dem ref/flat_dem.tif \
--focal-length 1480.5905405405405405 \
--optical-center 814.21 600.0 \
--pixel-pitch 1 \
--refine-camera \
--refine-intrinsics distortion \
-o csm/JNCR_2021158_34C00001_V01_0010.json
The values for the focal length (in pixels) and optical center (in pixels) were
obtained by peeking in the .cub file metadata.
The resulting lens distortion model is not the one for JunoCam, which has two
distortion parameters, but rather the OpenCV radial-tangential model with five
parameters (:numref:`csm_frame_def`). To use the JunoCam lens distortion model,
adjust the value of ``--distortion-type`` in ``cam_gen`` above.
The ``cam_test`` program (:numref:`cam_test`) can help validate that the camera
is converted well.
The intrinsics of this camera are transferred without further optimization
to the other cameras as::
sample=csm/JNCR_2021158_34C00001_V01_0010.json
for f in \
img/JNCR_2021158_34C0000[1-2]_V01_0009.cub \
img/JNCR_2021158_34C0000[1-2]_V01_001[0-3].cub \
; do
g=${f/.cub/.json}
g=csm/$(basename $g)
cam_gen $f \
--input-camera $f \
--sample-file $sample \
--reference-dem ref/flat_dem.tif \
--pixel-pitch 1 \
--refine-camera \
--refine-intrinsics none \
-o $g
done
Next, bundle adjustment is run, with the previously optimized adjustments that
reflect the registration to the reference Voyager-Galileo mosaic::
bundle_adjust \
img/JNCR_2021158_34C0000[1-2]_V01_0009.cub \
img/JNCR_2021158_34C0000[1-2]_V01_001[0-3].cub \
csm/JNCR_2021158_34C0000[1-2]_V01_0009.json \
csm/JNCR_2021158_34C0000[1-2]_V01_001[0-3].json \
--input-adjustments-prefix ba/run \
--match-files-prefix dense_matches/run \
--num-iterations 50 \
--solve-intrinsics \
--intrinsics-to-float all \
--intrinsics-to-share all \
--heights-from-dem ref/flat_dem.tif \
--heights-from-dem-uncertainty 5000 \
gcp/*.gcp \
-o ba_rfne/run
Lastly, stereo is run with the optimized model state camera files
(:numref:`csm_state`) saved in ``ba_rfne``.
The result is in :numref:`junocam_opt`.
It was found that better DEMs are produced by re-mapprojecting with latest
cameras and re-running stereo from scratch, rather than reusing stereo runs with
the option ``--prev-run-prefix`` (:numref:`parallel_stereo`). Likely that is
because the cameras change in non-small ways.
With ISIS 9.0.0 and later, a CSM file produced as above can be embedded in
the .cub file to be used with ISIS (:numref:`embedded_csm`).
================================================
FILE: docs/examples/k10.rst
================================================
.. _k10example:
K10
---
K10 is an Earth-based research rover within the Intelligent Robotics
Group at NASA Ames, the group ASP developers belong to. The cameras on
this rover use a simple Pinhole model. The use of ASP with these cameras
is illustrated in the ``examples/K10`` directory (just type 'make'
there). Just as for the MER datatset (:numref:`mer-example`),
only the creation of a point cloud is supported.
See :numref:`examples` for other examples.
================================================
FILE: docs/examples/kaguya.rst
================================================
.. _kaguya_tc:
Kaguya Terrain Camera
---------------------
The Kaguya Terrain Camera (TC) is a push-broom imager, with a spatial resolution
of 10 m. The images are acquired from a 100 km altitude above the Moon. It was
part of the JAXA `Kaguya `_ orbiter.
Kaguya TC has two sensors, named TC1 and TC2, forming a stereo pair. They see
roughly the same region on the ground, with a convergence angle of about 30 degrees
(:numref:`stereo_pairs`). These sensors may have slightly different focal
lengths and distortion coefficients.
Fetching the data
~~~~~~~~~~~~~~~~~
Visit the `product search
`_ page, and
enter a small search region.
Fetch the raw data sets for a desired stereo pair, starting with the *TC1* and
*TC2* prefixes (not the DEM or other products). Both the .img and .lbl files are needed.
::
wget https://darts.isas.jaxa.jp/pub/pds3/sln-l-tc-3-w-level2b0-v1.0/20080605/data/TC1W2B0_01_02936N034E0938.img.gz
wget https://darts.isas.jaxa.jp/pub/pds3/sln-l-tc-3-w-level2b0-v1.0/20080605/data/TC1W2B0_01_02936N034E0938.lbl
wget https://darts.isas.jaxa.jp/pub/pds3/sln-l-tc-3-w-level2b0-v1.0/20080605/data/TC2W2B0_01_02936N036E0938.img.gz
wget https://darts.isas.jaxa.jp/pub/pds3/sln-l-tc-3-w-level2b0-v1.0/20080605/data/TC2W2B0_01_02936N036E0938.lbl
Preparing the data
~~~~~~~~~~~~~~~~~~
Unzip the .img.gz files with ``gunzip``.
Ensure that ISIS is installed, and that ISISROOT and ISISDATA are set, per
:numref:`planetary_images`. The Kaguya kernels can then be downloaded with the
command::
$ISISROOT/bin/downloadIsisData kaguya $ISISDATA
For each image, run commands along the lines of::
$ISISROOT/bin/kaguyatc2isis \
from=TC1W2B0_01_02936N034E0938.lbl \
to=TC1W2B0_01_02936N034E0938.cub \
setnullrange=NO sethrsrange=NO sethisrange=NO \
setlrsrange=NO setlisrange=NO
$ISISROOT/bin/spiceinit from=TC1W2B0_01_02936N034E0938.cub \
web=false attach=TRUE cksmithed=FALSE ckrecon=TRUE \
ckpredicted=FALSE cknadir=FALSE spksmithed=true \
spkrecon=TRUE spkpredicted=FALSE shape=SYSTEM \
startpad=0.0 endpad=0.0
Create CSM cameras (:numref:`csm`)::
$ISISROOT/bin/isd_generate --only_naif_spice \
TC1W2B0_01_02936N034E0938.cub \
-k TC1W2B0_01_02936N034E0938.cub
Bundle adjustment and stereo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Run bundle adjustment (:numref:`bundle_adjust`) and stereo
(:numref:`parallel_stereo`)::
bundle_adjust \
TC1W2B0_01_02936N034E0938.cub TC2W2B0_01_02936N036E0938.cub \
TC1W2B0_01_02936N034E0938.json TC2W2B0_01_02936N036E0938.json \
--tri-weight 0.1 --camera-weight 0.0 \
-o ba/run
parallel_stereo --stereo-algorithm asp_mgm --subpixel-mode 9 \
TC1W2B0_01_02936N034E0938.cub TC2W2B0_01_02936N036E0938.cub \
TC1W2B0_01_02936N034E0938.json TC2W2B0_01_02936N036E0938.json \
--bundle-adjust-prefix ba/run \
stereo/run
For datasets with very oblique illumination, ``--subpixel-mode 2``
(:numref:`subpixel_options`) worked better, but is much slower.
Run ``point2dem`` (:numref:`point2dem`) to get a DEM. Consider using the
stereographic projection centered at the region of interest::
point2dem --stereographic --auto-proj-center \
--tr 10 stereo/run-PC.tif
See :numref:`sfs_kaguya_example` for a clip of the produced DEM.
It is suggested to rerun stereo with mapprojected images
(:numref:`mapproj-example`), to get a higher quality output.
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices
when running stereo.
Alignment
~~~~~~~~~
The produced DEM can be aligned with ``pc_align`` (:numref:`pc_align`) to the
LOLA RDR product.
.. _sfs_kaguya:
Shape-from-shading with Kaguya TC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here it will be illustrated how to run Shape-from-Shading (:numref:`sfs`) on
Kaguya TC images. An overview of SfS and examples for other planets are given in
:numref:`sfs_usage`.
First, ensure that the data are fetched and a stereo terrain is created, per
:numref:`kaguya_tc`. Shape-from-shading expects a DEM with no holes which is
also rather smooth. It should be at the same ground resolution as the input
images, which in this case is 10 meters per pixel. It is best to have it in a
local projection, such as stereographic.
We will modify the DEM creation command from above to use a large search radius to fill
any holes::
point2dem --stereographic --auto-proj-center \
--tr 10 --search-radius-factor 10 \
stereo/run-PC.tif
(adjust the projection center for your location).
Inspect the produced DEM ``stereo/run-DEM.tif`` in ``stereo_gui`` in hillshading
mode. Any additional holes can be filled with ``dem_mosaic``
(:numref:`dem_mosaic_extrapolate`).
It is also suggested to blur it a little, to make it smoother::
dem_mosaic --dem-blur-sigma 2 stereo/run-filled-dem.tif \
-o stereo/run-blurred-dem.tif
Then crop a region with ``gdal_translate`` that has no missing data.
Mapproject (:numref:`mapproject`) onto this DEM the left and right images with
the corresponding ``.json`` camera files, while using the adjustments in
``ba/run``. Overlay the resulting georeferenced images in ``stereo_gui``. This
is a very important sanity check to ensure that the cameras are registered
correctly.
Run SfS as::
parallel_sfs -i stereo/run-cropped-dem.tif \
TC1W2B0_01_02936N034E0938.cub \
TC2W2B0_01_02936N036E0938.cub \
TC1W2B0_01_02936N034E0938.json \
TC2W2B0_01_02936N036E0938.json \
--bundle-adjust-prefix ba/run \
--reflectance-type 1 \
--blending-dist 10 \
--min-blend-size 50 \
--allow-borderline-data \
--threads 4 \
--save-sparingly \
--crop-input-images \
--smoothness-weight 40000 \
--initial-dem-constraint-weight 10 \
--max-iterations 5 \
--shadow-thresholds "120 120" \
--tile-size 200 \
--padding 50 \
--processes 10 \
-o sfs/run
If there are artifacts in the produced DEM, increase the smoothness weight.
But if it is too large, it may blur the produced DEM too much.
The initial and final DEM can be inspected in ``stereo_gui``. The ``geodiff``
(:numref:`geodiff`) tool can be used to compare how much the DEM changed.
The initial DEM constraint was set rather high to ensure the DEM does not change
much as result of SfS. The shadow threshold depends on the pixel values and can
be very different for other images.
See, for comparison, the parameter choices made for LRO NAC
(:numref:`sfs-lola`). That example, and that entire chapter, also has the most
detailed discussion for how to run SfS, including the essential role of
alignment.
.. figure:: ../images/sfs_kaguya_example.png
:name: sfs_kaguya_example
:alt: SfS with Kaguya TC images
From left to right: the stereo DEM, SfS DEM (hillshaded), and a mapprojected
image. Some numerical noise is still seen, which can be removed by increasing
the smoothing weight. See below for another example.
Using multiple images with diverse illumination results in more detail and fewer
artifacts. For such data, bundle adjustment and pairwise stereo need to be run
first, and the produced DEMs and cameras must be aligned to a common reference,
such as LOLA (:numref:`ba_pc_align`). Then the aligned DEMs are inspected and
merged with ``dem_mosaic``, a clip is selected, holes are filled, noise is
blurred, and SfS is run. The process is explained in detail in
:numref:`sfs-lola`.
Here is an example of running SfS with the datasets::
TC{1,2}W2B0_01_02921S050E1100
TC{1,2}W2B0_01_05936S048E1097
All four images were used, though likely the first of each pair would have
been sufficient, given that images in each pair have the same illumination.
.. figure:: ../images/sfs_kaguya_dems.png
:name: sfs_kaguya_dems
:alt: SfS with Kaguya TC images with different illumination
SfS with Kaguya images with different illumination. From left to right: first
pair stereo DEM, second pair stereo DEM, and the SfS DEM (all hillshaded). It
can be seen that SfS adds more detail and removes numerical noise.
.. figure:: ../images/sfs_kaguya_ortho.png
:name: sfs_kaguya_ortho
:alt: SfS Kaguya TC ortho images
The images used for SfS (one from each pair). The Sun is in the East and West,
respectively.
Refining the camera intrinsics for Kaguya TC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See :numref:`kaguya_ba`.
.. _jitter_kaguya:
Solving for jitter for Kaguya TC
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Kaguya TC cameras exhibit some jitter, but its effect is not as strong as the one of
lens distortion, which needs to be solved for first, as in :numref:`kaguya_ba`.
Then, jitter can be corrected as for CTX in :numref:`jitter_ctx`. The precise commands
are below.
.. figure:: ../images/kaguya_jitter.png
:name: jitter_kaguya_fig
:alt: Jitter for Kaguya TC
First row: the stereo DEM and orthoimage. Second row: The difference of
stereo DEM to LOLA. Third row: the triangulation error
(:numref:`triangulation_error`). These are before (left) and after (right)
solving for jitter. The ranges in the colorbar are in meters.
Here we worked with the stereo pair::
TC1W2B0_01_05324N054E2169
TC2W2B0_01_05324N056E2169
Stereo was run with mapprojected images (:numref:`mapproj-example`).
Dense matches were produced from stereo disparity (:numref:`dense_ip`).
Having on the order of 20,000 dense matches is suggested.
The DEM and cameras were aligned to LOLA, and lens distortion was solved
for as in :numref:`kaguya_ba` (using additional overlapping images).
The resulting optimized cameras were passed in to the jitter solver.
The DEM to constrain against was produced from LOLA, with a command as::
point2dem \
-r moon \
--stereographic \
--auto-proj-center \
--csv-format 2:lon,3:lat,4:radius_km \
--search-radius-factor 5 \
--tr 25 \
lola.csv
Solving for jitter::
jitter_solve \
TC1W2B0_01_05324N054E2169.cub \
TC2W2B0_01_05324N056E2169.cub \
ba/run-TC1W2B0_01_05324N054E2169.adjusted_state.json \
ba/run-TC2W2B0_01_05324N056E2169.adjusted_state.json \
--max-pairwise-matches 20000 \
--num-lines-per-position 300 \
--num-lines-per-orientation 300 \
--max-initial-reprojection-error 20 \
--match-files-prefix dense_matches/run \
--heights-from-dem lola-DEM.tif \
--heights-from-dem-uncertainty 10 \
--anchor-dem lola-DEM.tif \
--num-anchor-points 5000 \
--num-anchor-points-extra-lines 1000 \
--anchor-weight 0.01 \
--num-iterations 20 \
-o jitter/run
The value of ``--anchor-weight`` can be increased to 0.1 - 0.5, if oscillations
are seen at the starting and ending image lines.
================================================
FILE: docs/examples/lronac.rst
================================================
.. _lronac-example:
Lunar Reconnaissance Orbiter (LRO) NAC
--------------------------------------
This section will describe in detail how to process an LRO NAC dataset.
(See also :numref:`lronac_csm` for a ready-made example using a different
dataset for which all inputs have already been prepared.)
The site
~~~~~~~~
In this example we will consider a stereo pair that covers the
Taurus-Littrow valley on the Moon where, on December 11, 1972, the
astronauts of Apollo 17 landed. However, this stereo pair does not
contain the landing site. It is slightly west; focusing on the
Lee-Lincoln scarp that is on North Massif. The scarp is an 80 m high
feature that is the only visible sign of a deep fault.
.. figure:: ../images/examples/lrocna/lroc-na-example2_combined.png
Example output possible with a LROC NA stereo pair, using both
CCDs from each observation, courtesy of the lronac2mosaic.py tool.
LRO NAC camera design
~~~~~~~~~~~~~~~~~~~~~
LRO has two Narrow Angle Cameras (NAC), with both acquiring image data
at the same time, so each observation consists
of two images, left and right, denoted with ``L`` and ``R``.
These are not meant to be used as a stereo pair, as the camera
center is virtually in the same place for both, and they have very little
overlap. For stereo one needs two such observations, with a
reasonable perspective difference (baseline) among the two.
Then stereo can happen by pairing an L or R image from the first
observation with an L or R image from the second. Alternatively, each
observation's L and R images can be stitched first, then stereo happens
between the two stitched images. Both of these approaches will be
discussed below.
Download
~~~~~~~~
Download the experimental data records (EDR) for observations
M104318871 and M104311715 from http://wms.lroc.asu.edu/lroc/search.
Alternatively, search by original IDs of 2DB8 and 4C86 in the
PDS.
The download will result in four files, named M104318871LE.img,
M104318871RE.img, M104311715LE.img, and M104311715RE.img.
.. _lro_nac_no_stitch:
Preparing the inputs without stitching
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The individual ``L`` and ``R`` images in an LRO NAC observation can be
used without stitching if desired to keep the original camera models.
Unstitched cameras can also be converted to CSM (:numref:`csm`), which
will provide a great speed up for stereo, bundle adjustment, and
Shape-from-Shading (:numref:`sfs_usage`).
We convert each .img file to an ISIS .cub camera image, initialize the
SPICE kernels, and perform radiometric calibration and echo
correction. Here are the steps, illustrated on one image::
f=M104318871LE
lronac2isis from = ${f}.IMG to = ${f}.cub
spiceinit from = ${f}.cub
lronaccal from = ${f}.cub to = ${f}.cal.cub
lronacecho from = ${f}.cal.cub to = ${f}.cal.echo.cub
Note that for these commands to succeed, ISIS and its supporting data
must be downloaded, per :numref:`planetary_images`.
Stitching the LE and RE observations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In this case all ISIS preprocessing of the EDRs is performed via the
``lronac2mosaic.py`` command (:numref:`lronac2mosaic`)::
lronac2mosaic.py M104318871LE.img M104318871RE.img
lronac2mosaic.py M104311715LE.img M104311715RE.img
This runs ``lronac2isis``, ``lronaccal``, ``lronacecho``,
``spiceinit``, ``noproj``, and ``handmos`` to create a stitched
unprojected image for each observation. In this example we don't
mapproject the images as ASP can usually get good results. More
aggressive terrain might require an additional ``cam2map4stereo.py``
step.
In case of failure, it is suggested to re-run this tool with the option
``--keep`` to keep the intermediate files, and then inspect the ones before
the final step.
Running stereo
~~~~~~~~~~~~~~
Stereo can then be run either with unstitched or stitched .cub files.
Here's an example with the unstitched LE images::
parallel_stereo M104318871LE.cub M104311715LE.cub \
--alignment-method affineepipolar \
run/run
Create a DEM, orthoimage, and error image with ``point2dem``
(:numref:`point2dem`)::
point2dem --stereographic --auto-proj-center \
--errorimage --orthoimage \
run/run-PC.tif run/run-L.tif
Check the stereo convergence angle as printed during preprocessing
(:numref:`stereo_pairs`). That angle is often too small for LRO NAC,
and then the results are not going to be great.
Check the triangulation error (:numref:`triangulation_error`) in the produced
error image. This may suggest that bundle adjustment may be needed
(:numref:`bundle_adjust`).
See :numref:`nextsteps` for a discussion about various stereo
speed-vs-quality choices. Consider using mapprojection
(:numref:`mapproj-example`) for best results for steep slopes.
It is strongly suggested to convert the cameras to CSM
(:numref:`csm_linescan`). This makes mapprojection faster,
and also can help solve for jitter (:numref:`jitter_solve`).
.. _lronac_align:
Validation and alignment
~~~~~~~~~~~~~~~~~~~~~~~~
A `LOLA `_ point
cloud can be downloaded for the area of interest. Download the RDR
``PointPerRow`` product.
The produced DEM can be aligned to LOLA with ``pc_align`` (:numref:`pc_align`),
with a command as::
pc_align --max-displacement 500 \
--csv-format 2:lon,3:lat,4:radius_km \
--save-inv-transformed-reference-points \
run/run-DEM.tif LOLA.csv \
-o run/run-align
Solving for jitter
~~~~~~~~~~~~~~~~~~
LRO NAC images can have jitter. A process as for CTX in :numref:`jitter_ctx`
usually works well to remove it (see also a multi-image example in
:numref:`jitter_multiple_images`).
================================================
FILE: docs/examples/mer.rst
================================================
.. _mer-example:
Mars Exploration Rovers
-----------------------
The Mars Exploration Rovers (MER) have several cameras onboard that acquire
stereo pairs. The images come with CAHVOR camera models, that have local
positions and orientations.
ASP can create point clouds and textured meshes from these cameras. DEMs cannot
be created right away, unlike for MSL, which has geolocation information
(:numref:`csm_msl`), but an SfM solution is likely to work as in
:numref:`rig_msl`.
PANCAM, NAVCAM, HAZCAM
~~~~~~~~~~~~~~~~~~~~~~
These are cameras on the rover and are all processed the same way. It is
preferred to use NAVCAM images, as those have less distortion than the HAZCAM.
.. figure:: ../images/examples/mer/mer_mesh.png
Left input image and produced textured mesh.
Recipe
^^^^^^
Download the data from the `PDS Image Atlas `_.
::
wget https://planetarydata.jpl.nasa.gov/img/data/mer/mer2no_0xxx/data/sol0766/edr/2n194370551effap00p0675l0m1.img
wget https://planetarydata.jpl.nasa.gov/img/data/mer/mer2no_0xxx/data/sol0766/edr/2n194370551effap00p0675r0m1.img
Create the CAHVOR cameras::
mer2camera 2n194370551effap00p0675l0m1.img
mer2camera 2n194370551effap00p0675r0m1.img
Run stereo (:numref:`parallel_stereo`) and create a mesh with ``point2mesh`` (:numref:`point2mesh`)::
parallel_stereo \
2n194370551effap00p0675l0m1.img \
2n194370551effap00p0675r0m1.img \
2n194370551effap00p0675l0m1.cahvor \
2n194370551effap00p0675r0m1.cahvor \
run/run
point2mesh -s 2 --texture-step-size 2 \
run/run-PC.tif run/run-L.tif
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices.
It is suggested to filter out points that are not triangulated well because they
are too far using the ``parallel_stereo`` option ``--min-triangulation-angle``
(:numref:`stereodefault`).
For finer-grained control, can use::
--universe-center camera --near-universe-radius 0.7 \
--far-universe-radius 80.0
These may be suggested as well::
--alignment-method epipolar --force-use-entire-range
================================================
FILE: docs/examples/moc.rst
================================================
.. _moc_example:
Mars Global Surveyor MOC-NA
---------------------------
In the Stereo Pipeline Tutorial in :numref:`moc_tutorial`, we showed
you how to process a narrow angle MOC stereo pair that covered a
portion of Hrad Vallis. In this section we will show you more
examples, some of which exhibit a problem common to stereo pairs from
linescan imagers: ``spacecraft jitter`` is caused by oscillations of
the spacecraft due to the movement of other spacecraft hardware. All
spacecraft wobble around to some degree but some are particularly
susceptible.
Jitter causes wave-like distortions along the track of the satellite
orbit in DEMs produced from linescan camera images. This effect can be
very subtle or quite pronounced, so it is important to check your data
products carefully for any sign of this type of artifact. The following
examples will show the typical distortions created by this problem.
Note that the science teams of HiRISE and LROC are actively working on
detecting and correctly modeling jitter in their respective SPICE data.
If they succeed in this, the distortions will still be present in the
raw images, but the jitter will no longer produce ripple artifacts in
the DEMs produced using ours or other stereo reconstruction software.
ASP has its own jitter solver, which was shown to reduce the jitter
for CTX (Mars) and DigitalGlobe (Earth) data (:numref:`jitter_solve`).
Ceraunius Tholus
~~~~~~~~~~~~~~~~
Ceraunius Tholus is a volcano in northern Tharsis on Mars. It can be
found at 23.96 N and 262.60 E. This DEM crosses the volcano's caldera.
.. figure:: ../images/examples/mocna/ceraunius_tholus_mocna_ge_combined.png
:name: mocna_ceraunius_example
Example output for MOC-NA of Ceraunius Tholus. Notice the presence
of severe washboarding artifacts due to spacecraft jitter.
.. _commands-2:
Commands
^^^^^^^^
Download the M08/06047 and R07/01361 images from the PDS:
::
wget -O M0806047.imq \
https://planetarydata.jpl.nasa.gov/img/data/mgs-m-moc-na_wa-2-sdp-l0-v1.0/mgsc_1068/m08060/m0806047.imq
wget -O R0701361.imq \
https://planetarydata.jpl.nasa.gov/img/data/mgs-m-moc-na_wa-2-sdp-l0-v1.0/mgsc_1270/r07013/r0701361.imq
Then process:
::
ISIS> moc2isis f=M0806047.imq t=M0806047.cub
ISIS> moc2isis f=R0701361.imq t=R0701361.cub
ISIS> spiceinit from=M0806047.cub
ISIS> spiceinit from=R0701361.cub
ISIS> cam2map4stereo.py M0806047.cub R0701361.cub
ISIS> parallel_stereo M0806047.map.cub R0701361.map.cub result/output
See :numref:`nextsteps` for a discussion about various
speed-vs-quality choices. See :numref:`examples` for other examples.
================================================
FILE: docs/examples/msl.rst
================================================
.. _rig_msl:
MSL navcam example
------------------
This is an example of using the ASP tools to process images taken by the Mars
Science Laboratory (MSL) rover `Curiosity
`_. See :numref:`rig_examples` for
other examples.
This approach uses the images to create a self-consistent solution, which can
be registered to the ground (:numref:`msl_registration`).
:numref:`csm_msl` discusses using the known camera poses for MSL.
.. _rig_msl_figure:
Illustration
~~~~~~~~~~~~
.. figure:: ../images/msl_kimberly_mesh.png
:name: rig_msl_figure1
:alt: MSL Kimberly mesh
.. figure:: ../images/msl_kimberly_photo.png
:name: rig_msl_figure2
:alt: MSL Kimberly photo
A mesh created with 22 MSL navcam images acquired on SOL 597 (top),
and several representative images from this set (bottom).
Sensor information
~~~~~~~~~~~~~~~~~~
Curiosity has two navcam sensors (left and right) mounted on a stereo
rig. Each records images at a resolution of 1024 x 1024 pixels. The
field of view is 45 degrees.
.. _msl_challenges:
Challenges
~~~~~~~~~~
The navcam images are used to plan the path of the rover. They are not acquired
specifically for mapping. While there is good overlap and perspective difference
between images that are taken at the same time with the stereo rig, these
assumptions may not hold for images produced at different times. Moreover, after
the rover changes position, there is usually a large perspective difference and
little overlap with earlier images.
Prior work
~~~~~~~~~~
A very useful reference on processing MSL images is :cite:`caravaca20203d`. It
uses the commercial Agisoft Photoscan software. To help with matching the
images, that paper uses the global position and orientation of each image and
projects these onto the ground. Such data is not fully present in the .LBL files
in PDS, as those contain only local coordinates, and would necessitate queering
the SPICE database. It also incorporates lower-resolution "TRAV" images to tie
the data together.
.. _msl_image_prep:
Data preparation
~~~~~~~~~~~~~~~~
The images are fetched from PDS. For example, to get the data for day
(SOL) 597 on Mars, use the command::
dir=data/msl/MSLNAV_0XXX/DATA/SOL00597
wget -r -nH --cut-dirs=4 --no-parent \
--reject="index.html*" \
https://pds-imaging.jpl.nasa.gov/$dir \
--include $dir
This will create the directory ``SOL00597`` containing .IMG data files
and .LBL metadata. Using the ISIS ``pds2isis`` program (see
:numref:`planetary_images` for installation), these can be converted to
.cub files as::
pds2isis from = SOL00597/image.LBL to = SOL00597/image.cub
A .cub file obtained with the left navcam sensor will have a name like::
SOL00597/NLB_451023553EDR_F0310946NCAM00492M1.cub
while for the right sensor the prefix will be instead ``NRB``. The
full-resolution images have ``_F`` as part of their name, as above.
We will convert the .cub files to the TIF format so that they can be understood
by ``theia_sfm``. The ``rig_calibrator`` convention will be used, of storing
each sensor's data in its own subdirectory (:numref:`rig_data_conv`). We will
name the left and right navcam sensors ``lnav`` and ``rnav``. Then, the
conversion commands are along the lines of::
mkdir -p SOL00597/lnav
isis2std from = SOL00597/left_image.cub \
to = SOL00597/lnav/left_image.tif
Each produced image will have a timestamp, with the same value for the
left and right navcam images taken at the same time.
Image selection
~~~~~~~~~~~~~~~
A subset of 22 images was selected for SOL 597 (half for each of the
left and right navcam sensors). Images were chosen based on visual
inspection. A fully automatic approach may be challenging
(:numref:`msl_challenges`).
This dataset is available for
`download `_.
.. _msl_init_rig:
Setting up the initial rig
~~~~~~~~~~~~~~~~~~~~~~~~~~
Given the earlier sensor information, the focal length can be found
using the formula:
.. math::
f = \frac{w}{2\tan(\frac{\theta}{2})}
where :math:`w` is sensor width in pixels and :math:`\theta` is the field of
view. The focal length is then about 1236.0773 pixels. We will start
by assuming that the optical center is at the image center, and
no distortion. Hence, the initial rig configuration (:numref:`rig_config`)
will look like::
ref_sensor_name: lnav
sensor_name: lnav
focal_length: 1236.0773
optical_center: 512 512
distortion_coeffs:
distortion_type: no_distortion
image_size: 1024 1024
distorted_crop_size: 1024 1024
undistorted_image_size: 1024 1024
ref_to_sensor_transform: 1 0 0 0 1 0 0 0 1 0 0 0
depth_to_image_transform: 1 0 0 0 1 0 0 0 1 0 0 0
ref_to_sensor_timestamp_offset: 0
with an additional identical block for the ``rnav`` sensor (without
``ref_sensor_name``).
SfM map creation
~~~~~~~~~~~~~~~~
Given the data and rig configuration, the image names in .tif format
were put in a list, with one entry per line. The ``theia_sfm``
program (:numref:`theia_sfm`) was run to find initial camera poses::
theia_sfm \
--rig-config rig_config.txt \
--image-list list.txt \
--out-dir theia_rig
Next, ``rig_calibrator`` (:numref:`rig_calibrator`) is used, to
enforce the rig constraint between the left and right navcam sensors
and refine the intrinsics::
params="focal_length,optical_center"
float="lnav:${params} rnav:${params}"
rig_calibrator \
--rig-config rig_config.txt \
--nvm theia_rig/cameras.nvm \
--camera-poses-to-float "lnav rnav" \
--intrinsics-to-float "$float" \
--num-iterations 100 \
--num-passes 2 \
--num-overlaps 5 \
--out-dir rig_out
To optimize the distortion, one can adjust the rig configuration by setting
initial distortion values and type::
distortion_coeffs: 1e-8 1e-8 1e-8 1e-8 1e-8
distortion_type: radtan
and then defining the list of parameters to optimize as::
params="focal_length,optical_center,distortion"
For this example, plausible solutions were obtained with and without
using distortion modeling, but likely for creation of pixel-level
registered textured meshes handling distortion is important.
The produced pairwise matches in ``rig_out/cameras.nvm`` can be
inspected with ``stereo_gui`` (:numref:`stereo_gui_nvm`).
Mesh creation
~~~~~~~~~~~~~
Here, a point cloud is created from every stereo pair consisting of a left
sensor image and corresponding right image, and those are fused into
a mesh. Some parameters are set up first.
Stereo options (:numref:`stereodefault`)::
stereo_opts="
--stereo-algorithm asp_mgm
--alignment-method affineepipolar
--ip-per-image 10000
--min-triangulation-angle 0.1
--global-alignment-threshold 5
--session nadirpinhole
--no-datum
--corr-seed-mode 1
--max-disp-spread 300
--ip-inlier-factor 0.4
--nodata-value 0"
Point cloud filter options (:numref:`pc_filter`)::
maxDistanceFromCamera=100.0
pc_filter_opts="
--max-camera-ray-to-surface-normal-angle 85
--max-valid-triangulation-error 10.0
--max-distance-from-camera $maxDistanceFromCamera
--blending-dist 50 --blending-power 1"
Mesh generation options (:numref:`voxblox_mesh`)::
mesh_gen_opts="
--min_ray_length 0.1
--max_ray_length $maxDistanceFromCamera
--voxel_size 0.05"
Set up the pairs to run stereo on::
outDir=stereo
mkdir -p ${outDir}
grep lnav list.txt > ${outDir}/left.txt
grep rnav list.txt > ${outDir}/right.txt
The optimized rig, in ``rig_out/rig_config.txt``, and optimized
cameras, in ``rig_out/cameras.txt``, are passed to ``multi_stereo``
(:numref:`multi_stereo`)::
multi_stereo \
--rig_config rig_out/rig_config.txt \
--camera_poses rig_out/cameras.txt \
--undistorted_crop_win '1100 1100' \
--rig_sensor "lnav rnav" \
--first_step stereo \
--last_step mesh_gen \
--stereo_options "$stereo_opts" \
--pc_filter_options "$pc_filter_opts" \
--mesh_gen_options "$mesh_gen_opts" \
--left ${outDir}/left.txt \
--right ${outDir}/right.txt \
--out_dir ${outDir}
This created::
${outDir}/lnav_rnav/fused_mesh.ply
See the produced mesh in :numref:`rig_msl_figure`.
.. _msl_registration:
Ground registration
~~~~~~~~~~~~~~~~~~~
To create DEMs, for example for rover cameras, the cameras should be registered
to the ground. We will discuss how to do that both when a prior DEM is available
and when not. For registration to a local Cartesian coordinate system, see
instead :numref:`rig_calibrator_registration`.
Invocation of bundle adjustment
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``rig_calibrator`` option ``--save_pinhole_cameras`` can export the camera
poses to Pinhole format (:numref:`pinholemodels`). It will also save the list of
input images (:numref:`rc_ba`).
These can be ingested by ASP's bundle adjustment program
(:numref:`bundle_adjust`). It can transform the cameras to ground coordinates
using ground control points (GCP, :numref:`bagcp`), with the option
``--transform-cameras-with-shared-gcp``.
Here is an example invocation::
bundle_adjust \
--image-list rig_out/image_list.txt \
--camera-list rig_out/camera_list.txt \
--nvm rig_out/cameras.nvm \
--num-iterations 0 \
--inline-adjustments \
--datum D_MARS \
--remove-outliers-params "75 3 50 50" \
--transform-cameras-with-shared-gcp \
gcp1.gcp gcp2.gcp gcp3.gcp \
-o ba/run
The ``--datum`` option is very important, and it should be set depending
on the planetary body.
Using zero iterations will only apply the registration transform, and
*will preserve* the rig structure, up to a scale factor.
With a positive number of iterations, the cameras will be further refined
in bundle adjustment, while using the GCP. For such refinement it is important
to have many interest point matches between the images. This will *not preserve*
the rig structure.
We used high values in ``--remove-outliers-params`` to avoid removing valid
features in the images if there is unmodeled distortion.
See :numref:`ba_err_per_point` for a report file that measures reprojection errors,
including for GCP. It is very important to examine those. They should be less
than a few dozen pixels, and ideally less.
With the cameras correctly registered and self-consistent, dense stereo point
clouds and DEMs can be created (:numref:`nextsteps`), that can be mosaicked
(:numref:`dem_mosaic`) and aligned to a prior dataset (:numref:`pc_align`).
For difficult areas with few interest points matches, the images (with cameras
now in planetary coordinates) can be mapprojected, and the resulting images can
be used to find many more interest points (:numref:`mapip`).
Use of registered data with rig_calibrator
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The ``bundle_adjust`` program will produce the file ``ba/run.nvm`` having
the registered camera positions and the control network. This can be passed
back to ``rig_calibrator``, if needed, together with the latest optimized rig,
which is at ``rig_out/rig_config.txt``. The command is::
rig_calibrator \
--rig-config rig_out/rig_config.txt \
--nvm ba/run.nvm \
--camera-poses-to-float "lnav rnav" \
--intrinsics-to-float "$float" \
--num-iterations 100 \
--num-passes 2 \
--num-overlaps 0 \
--out-dir rig_out_reg
Here we set ``--num_overlaps 0`` as we do not want to try to create interest
point matches again.
GCP and custom DEM creation
^^^^^^^^^^^^^^^^^^^^^^^^^^^
GCP files can be created manually by point-and-click in ``stereo_gui``
(:numref:`creatinggcp`) or automatically (:numref:`gcp_gen`), if a prior DEM
and/or orthoimage are available.
If no prior DEM is available, it is possible to tie several features in the
images to made-up ground points. For example, consider a ground box with given
width and height, in meters, such as 10 x 4 meters. Create a CSV file named
``ground.csv`` of the form::
# x (meters) y(meters) height (meters)
0 0 0
10 0 0
10 4 0
0 4 0
This can be made into a DEM with ``point2dem`` (:numref:`point2dem`)::
proj="+proj=stere +lat_0=0 +lat_ts=0 +lon_0=0 +k=1 +x_0=0 +y_0=0 +a=3396190 +b=3396190 +units=m +no_defs"
format="1:northing,2:easting,3:height_above_datum"
point2dem \
--datum D_MARS \
--csv-format "$format" \
--csv-srs "$proj" \
--t_srs "$proj" \
--tr 0.1 \
--search-radius-factor 0.5 \
ground.csv
Ensure the correct planet radii and datum are used. The projection can be
auto-determined (:numref:`point2dem_proj`).
Then, following the procedure :numref:`creatinggcp`, features can be picked in
the images and tied to some of the corners of this box, creating GCP files,
which are then used as earlier.
Multiple subsets of the images can be used, with each producing a GCP file.
All can then be passed together to ``bundle_adjust``.
.. _msl_validation:
Validation of registered DEMs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Solving for both intrinsics and camera poses is a complex nonlinear problem,
that may have more than one solution. It is strongly suggested to compare the
produced individual DEMs (after alignment, :numref:`pc_align`) to a trusted DEM,
even if at a lower resolution.
In case of horizontal misalignment, it is suggested to individually align the
produced DEMs to the prior DEM, apply the alignment transform to the cameras
(:numref:`ba_pc_align`), then redo the bundle-adjustment with the aligned
cameras and the prior DEM as a constraint (:numref:`kaguya_ba`), while refining
the intrinsics. It is suggested to use a value of ``--heights-from-dem-uncertainty``
maybe as low as 0.1 or 0.01, if desired to fit tightly to the prior DEM. This may come,
however, at the cost of internal consistency.
The triangulation error for each DEM (:numref:`point2dem`) can help evaluate the
accuracy of the intrinsics. The ``geodiff`` program (:numref:`geodiff`), can be
used to assess the vertical agreement between DEMs.
For cases when the ASP-produced DEMs have remaining strong differences with the
prior DEM, use the ``dem2gcp`` program (:numref:`dem2gcp`) to create GCPs to help
correct this. One GCP file can be produced for each stereo pair, and then all
can be passed to ``bundle_adjust``.
.. figure:: ../images/viper_ground_truth_stereo_dem.png
:name: viper_ground_truth_stereo_dem
:alt: VIPER ground truth stereo DEM
A DEM measured with a point cloud scanner (top) and a mosaicked DEM produced
with ASP (bottom), that was carefully validated with the measured DEM. Data
acquired in the `Regolith Testbed
`_
at NASA Ames. The noise in the upper-left corner is due to an occluding light
source. Other sources of noise are because of shadows. The ``asp_bm`` algorithm was
used (:numref:`stereo_alg_overview`), which is one of the older algorithms in ASP.
Notes
~~~~~
- The voxel size for binning and meshing the point cloud was chosen
manually. An automated approach for choosing a representative voxel
size is to be implemented.
- The ``multi_stereo`` program does not use the interest points found
during SfM map construction. That would likely result in a good
speedup. It also does not run the stereo pairs in parallel.
================================================
FILE: docs/examples/orbital_rig.rst
================================================
.. _orbital_rig:
Orbital rig
-----------
This example shows how to produce synthetic images and cameras that model an
orbital rig with two frame camera sensors, and how to use the
:ref:`rig_calibrator` tool to refine the rig parameters, camera intrinsics, and
camera poses. This example demonstrates the new DEM height constraint feature
that constrains triangulated points to a reference DEM for improved accuracy.
Input DEM and orthoimage
~~~~~~~~~~~~~~~~~~~~~~~~
The synthetic data for this example is created with :ref:`sat_sim`, which
requires a DEM and an orthoimage of a region of interest as input.
To prepare this data, we obtained the free ASTER dataset::
AST_L1A_00404012022185436_20250920182851.hdf
around the San Luis Reservoir in Northern California. A DEM was created following
the workflow in :numref:`aster`. The orthoimage was produced by invoking
:ref:`mapproject` at a nominal resolution of 15 m/pixel. This resolution
reflects the ground sample distance (GSD) of the ASTER images.
Synthetic images
~~~~~~~~~~~~~~~~
The synthetic images and cameras were created with ``sat_sim``, which can
simulate an orbital rig (:numref:`sat_sim_rig`).
The rig was designed with left and right frame cameras, named ``left`` and
``right``. The initial rig configuration was created as described in
:numref:`msl_init_rig`. The sensor dimensions were set to 1000 x 1000
pixels, with the principal point at the image center. No lens distortion
was assumed.
The satellite height was set to 700 km, and the focal length to 35000 pixels.
This results in an estimated GSD of about 20 meters (GSD is approximately
the satellite height divided by the focal length). These parameters are
consistent with the ASTER instrument.
The ``sat_sim`` command for the nadir images was::
sat_sim \
--dem aster_dem.tif \
--ortho aster_ortho.tif \
--rig-config aster_rig.txt \
--rig-sensor-ground-offsets \
-0.01,0,-4000,0,0.01,0,4000,0 \
--first 1300 1200 700000 \
--last 1300 1500 700000 \
--first-ground-pos 1300 1200 \
--last-ground-pos 1300 1500 \
--roll 0 --pitch 0 --yaw 0 \
--num 3 \
--velocity 7500 \
-o sat_sim/run-nadir
The ``--rig-sensor-ground-offsets`` option places the left and right sensor
centers 0.01 m to each side of the rig center, and their footprints on the
ground are separated by 8000 m in the East-West direction. The satellite
itself follows a North-South orbit.
The rig configuration incorporating these controls is saved to the file::
sat_sim/run-nadir-rig_config.txt
This file contains the relationship between the rig sensors in the
``ref_to_sensor_transform`` field, in addition to the intrinsics from the
input rig. More details are in :numref:`sat_sim_rig_adjust`.
A similar command is run to create forward-looking images, but with the
``--pitch`` value set to 30 degrees and the output prefix set to
``sat_sim/run-fwd``.
The produced images will have names like::
sat_sim/run-nadir-0010000.418204756-left.tif
sat_sim/run-fwd-0009939.411652856-right.tif
following the naming convention in :numref:`rig_data_conv`. The components
of these filenames are the output prefix, the timestamp, and the sensor
name. Time modeling is described in :numref:`sat_sim_time`, and all options
for this program are documented in :ref:`sat_sim_options`.
.. figure:: ../images/orbital_rig.png
:name: orbital_rig_fig
:alt: Orbital rig example
A sample left and right image as produced by the rig (after mapprojection).
The images have notable overlap. These show some fields and mountain
foothills in California's Central Valley.
Interest point matches
~~~~~~~~~~~~~~~~~~~~~~
The ``rig_calibrator`` program expects the camera poses and the interest point
matches between images to be stored in an NVM file (a format commonly employed in
Structure-from-Motion applications). See :numref:`ba_nvm`.
Since there are 12 input images, and each must be matched against every other
one, the :numref:`parallel_bundle_adjust` program is called to ensure
parallelization::
parallel_bundle_adjust \
--ip-per-image 10000 \
--output-cnet-type nvm \
sat_sim/*{left,right}.tif \
sat_sim/*{left,right}.tsai \
--camera-weight 1.0 \
--tri-weight 1.0 \
--num-iterations 100 \
-o ba/run
Rig calibration
~~~~~~~~~~~~~~~
The :ref:`rig_calibrator` program is then run::
rig_calibrator \
--rig-config sat_sim/run-nadir-rig_config.txt \
--use-initial-rig-transforms \
--fix-rig-translations \
--nvm ba/run.nvm \
--camera-poses-to-float "left right" \
--intrinsics-to-float \
"left:focal_length right:focal_length" \
--camera-position-uncertainty 1.0 \
--heights-from-dem aster_dem.tif \
--heights-from-dem-uncertainty 2.0 \
--heights-from-dem-robust-threshold 0.1 \
--tri-weight 1.0 \
--save-pinhole-cameras \
--num-iterations 100 \
--out-dir rig
Since the input data is perfect, very few changes are expected. The produced
pinhole cameras (:numref:`pinholemodels`), saved in the output ``rig`` directory
(via the ``--save-pinhole-cameras`` option), should be very similar to the
initial inputs in the ``sat_sim`` directory.
The ``--heights-from-dem`` option demonstrates the new DEM constraint feature.
The implementation is as for bundle adjustment (:numref:`heights_from_dem`).
Here we used ``--use-initial-rig-transforms`` because we start with a known rig,
rather than having to determine it from camera poses.
The option ``--fix-rig-translations`` is quite important for orbital rigs. The
distance between rig sensors is very small compared to the distance from the
satellite to the ground. Without this constraint the rig sensors could move
notably in the sensor plane without affecting the reprojection error.
Consider using here the option ``--camera-position-uncertainty``. It is
suggested to be generous with the uncertainty value, as this constraint can
prevent convergence.
See :numref:`rig_opts` for the full list of options.
================================================
FILE: docs/examples/pbs_slurm.rst
================================================
.. _pbs_slurm:
Using PBS and SLURM
-------------------
Running ``parallel_stereo`` (:numref:`parallel_stereo`) can be very
computationally expensive, so often it is launched on high-performance
multi-machine systems. Here it will be shown how to run this program on a
*Portable Batch System* (*PBS*) setup, such as NASA Pleiades, and on a *Simple
Linux Utility for Resource Management* (*SLURM*) system.
In either of these, it is assumed that all compute nodes share disk storage
space and are able communicate with ssh without password. The list of nodes
must be passed as an argument to this program via ``--nodes-list``.
PBS
~~~
On a PBS system, one can have a script as follows::
#!/bin/bash
# Change to current directory
cd $PBS_O_WORKDIR
# Set the path to ASP tools
export PATH=/path/to/ASP/bin:$PATH
# Run parallel_stereo
parallel_stereo --stereo-algorithm asp_mgm \
--processes 4 --subpixel-mode 3 -t rpc \
--nodes-list $PBS_NODEFILE \
left.tif right.tif left.xml right.xml \
run/run
# Run point2dem
point2dem --auto-proj-center run/run-PC.tif
Note the two special environmental variables ``PBS_O_WORKDIR`` and ``PBS_NODEFILE``
which refer to the current work directory in which the script is started, and the
list of nodes allocated for the job.
Ensure the option ``--nodes-list`` is set, otherwise only the head node
will be used.
This script, named for example, ``run.sh``, can be launched as::
qsub -m n -r n -N jobName -l walltime=12:00:00 \
-W group_list=yourGroup -j oe -S /bin/bash \
-l select=8:ncpus=20:model=ivy -- $(pwd)/run.sh
Additional arguments can be passed in on this line to ``run.sh``,
which can be accessed from within that script as ``$1``, ``$2``, etc.,
per bash shell conventions.
It is strongly suggested to learn what each of the above options does
and adjust them for your needs.
SLURM
~~~~~
With SLURM, a script as follows can work::
#!/bin/bash
#SBATCH --job-name=asp
#SBATCH --output=asp.log
#SBATCH --nodes=4
#SBATCH --ntasks-per-node=36
#SBATCH --time=50:00:00
#SBATCH --partition=queue1
# Change to the directory in which the job was submitted
cd $SLURM_SUBMIT_DIR
# Create a temporary list of nodes in current directory
nodesList=$(mktemp -p $(pwd))
# Set up the nodes list
scontrol show hostname $SLURM_NODELIST | tr ' ' '\n' > $nodesList
# Run parallel_stereo. (Ensure that this program is in the path.)
parallel_stereo --nodes-list $nodesList \
--processes 4 \
--parallel-options '--sshdelay 0.1' \
# Delete the temporary list of nodes
/bin/rm -fv $nodesList
As before, the options and values above should be adjusted for your needs.
Ensure the option ``--nodes-list`` is set, otherwise only the head node
will be used.
If your SLURM setup requires a custom ssh port, set in the list of nodes
the full ssh command to each node, rather than the node name. Example::
ssh -p port1 node1
ssh -p port2 node2
================================================
FILE: docs/examples/perusat1.rst
================================================
.. _perusat1:
PeruSat-1
---------
PeruSat-1 (launched 2016) is a Peruvian Earth observation satellite with
0.7-meter panchromatic resolution. It provides exact linescan camera models and
RPC-approximated camera models in separate files. The names for these start with
"DIM" and "RPC", respectively, and end with ".XML".
ASP expects raw (non-orthorectified) images. The USGS CSM library (:numref:`csm`)
is used for the linescan model.
The session type is ``-t perusat`` (:numref:`ps_options`). If the ``-t``
option is not specified, it will be auto-detected from the camera files.
For the RPC model (:numref:`rpc`), the option ``-t rpc`` should be used
and the RPC camera files should be passed in.
.. _perusat1_stereo:
Bundle adjustment and stereo with raw images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Bundle adjustment (:numref:`bundle_adjust`) is suggested before stereo::
bundle_adjust -t perusat \
--camera-weight 0 \
--tri-weight 0.1 \
left.tif right.tif \
left_exact.xml right_exact.xml \
-o ba/run
With the exact models, the stereo command, with bundle-adjusted cameras, is::
parallel_stereo -t perusat \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--bundle-adjust-prefix ba/run \
left.tif right.tif \
left_exact.xml right_exact.xml \
results/run
Then, a DEM is created with ``point2dem`` (:numref:`point2dem`)::
point2dem results/run-PC.tif
For steep terrain, it is suggested to run stereo with mapprojected images
(:numref:`perusat1_map`).
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices
for stereo.
See :numref:`jitter_pleiades` for an example of solving for jitter with these
cameras. Note the limitations of the jitter solver in
:numref:`jitter_limitations`. This is available as of build 2026/03
(:numref:`release`).
.. _perusat1_map:
Stereo with mapprojected images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ASP supports running stereo with mapprojected PeruSat-1 images
(:numref:`mapproj-example`). As of build 2026/03 (:numref:`release`),
mapprojection is significantly faster due to the switch to the CSM camera model.
All input images must be mapprojected at the same resolution (which is
comparable with the ground sample distance, GSD). The same camera models must be
used for mapprojection as for stereo, so one should not mix the exact and RPC
cameras.
It is strongly suggested to verify that the input DEM used for mapprojection is
relative to the ellipsoid (:numref:`conv_to_ellipsoid`).
Example::
proj="+proj=utm +zone=17 +datum=WGS84 +units=m +no_defs"
mapproject -t perusat \
--tr 0.7 \
--t_srs "$proj" \
--bundle-adjust-prefix ba/run \
ref_dem.tif \
left.tif \
left_exact.xml \
left_map.tif
mapproject -t perusat \
--tr 0.7 \
--t_srs "$proj" \
--bundle-adjust-prefix ba/run \
ref_dem.tif \
right.tif \
right_exact.xml \
right_map.tif
parallel_stereo -t perusat \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--bundle-adjust-prefix ba/run \
left_map.tif right_map.tif \
left_exact.xml right_exact.xml \
run_map/run \
ref_dem.tif
point2dem run_map/run-PC.tif
The projection string above needs to be modified for your area of interest. It
is strongly suggested to use an auto-determined UTM or polar stereographic
projection (:numref:`point2dem_proj`).
The value of the ``--tr`` option is the ground sample distance. It is normally
0.7 meters for PeruSat-1 PAN images. The XML files should have the GSD value.
To not use bundle-adjusted cameras, remove the option ``--bundle-adjust-prefix``
from all ``mapproject`` and ``parallel_stereo`` commands above.
It is strongly suggested to overlay the left and right mapprojected images on
each other and on the input DEM in ``stereo_gui`` (:numref:`stereo_gui`). A
systematic shift likely indicates a vertical datum disagreement between the DEM
and the camera models.
.. _perusat1_notes:
Notes
~~~~~
For PeruSat-1 exact linescan camera models the atmospheric correction and
velocity aberration corrections (:cite:`nugent1966velocity`) are disabled, as
these decrease somewhat the agreement with the RPC models.
DEMs created with the exact and RPC models differ by a systematic
vertical shift of about 15 meters for unknown reasons, even though the
intersection error maps are very similar. Nothing in the sensor manual
or camera metadata suggests the cause of this. The ``pc_align`` tool
(:numref:`pc_align`) can be used to reduce this discrepancy. The mean absolute
difference of the (full-image extent) aligned DEMs is about 0.17
meters.
================================================
FILE: docs/examples/pleiades.rst
================================================
.. _pleiades:
Pleiades
--------
ASP supports the 1A/1B and NEO satellites from Airbus Pleiades. For NEO, see
:numref:`pleiades_neo` for additional notes. SPOT 6 and 7 use a closely related
camera model (:numref:`spot67`).
Generally, ASP expects raw images, not orthorectified images. ASP supports the
Pleiades ortho products, if the projection was done on a surface of constant
height (:numref:`pleiades_projected`). It does not support images orthorectified
with a 3D terrain model.
The Airbus Pleiades data have both an exact linescan camera model and an
approximate RPC model (:numref:`rpc`). These are stored in separate files. The
names for these start with "DIM" and "RPC", respectively, and end with ".XML".
ASP supports both kinds. The USGS CSM library (:numref:`csm`) is used for
linescan models.
See :numref:`jitter_pleiades` for an example of solving for
jitter for these cameras.
Bundle adjustment and stereo with raw images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See :numref:`airbus_tiled` if the input images arrive in multiple tiles.
Bundle adjustment (:numref:`bundle_adjust`) is suggested before stereo. It
should be run as::
bundle_adjust -t pleiades \
--camera-weight 0 \
--tri-weight 0.1 \
left.tif right.tif \
left_exact.xml right_exact.xml \
-o ba/run
With the exact models, the stereo command, with bundle-adjusted cameras, is::
parallel_stereo -t pleiades \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--bundle-adjust-prefix ba/run \
left.tif right.tif \
left_exact.xml right_exact.xml \
results/run
Then, a DEM is created with ``point2dem`` (:numref:`point2dem`)::
point2dem results/run-PC.tif
For steep terrain, it is suggested to run stereo with mapprojected
images (:numref:`pleiades_map`).
See :numref:`nextsteps` for a discussion about various
speed-vs-quality choices for stereo.
For the RPC model the option ``-t rpc`` should be used and the correct
camera files should be passed in. If the ``-t`` option is not
specified, it will be auto-guessed based on the content of the camera
files provided as inputs.
The ``--bundle-adjust-prefix`` option above enables the use of
bundle-adjusted cameras.
For Pleiades exact linescan camera models the atmospheric correction
and velocity aberration corrections (:cite:`nugent1966velocity`) are
disabled. This ensures that the exact and RPC camera models agree (see
below).
.. _pleiades_map:
Stereo with mapprojected images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ASP supports running stereo with mapprojected Pleiades images
(:numref:`mapproj-example`).
All input images must be mapprojected at the same resolution (which is
comparable with the ground sample distance, GSD). The same camera models must be
used for mapprojection as for stereo, so one should not mix the exact and RPC
cameras.
Ensure the input DEM used for mapprojection is relative to the ellipsoid
(:numref:`conv_to_ellipsoid`).
Example::
proj="+proj=utm +zone=13 +datum=WGS84 +units=m +no_defs"
mapproject -t pleiades \
--tr 0.5 \
--t_srs "$proj" \
--bundle-adjust-prefix ba/run \
ref_dem.tif \
left.tif \
left_exact.xml \
left_map.tif
mapproject -t pleiades \
--tr 0.5 \
--t_srs "$proj" \
--bundle-adjust-prefix ba/run \
ref_dem.tif \
right.tif \
right_exact.xml \
right_map.tif
parallel_stereo -t pleiades \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--bundle-adjust-prefix ba/run \
left_map.tif right_map.tif \
left_exact.xml right_exact.xml \
run_map/run \
ref_dem.tif
point2dem run_map/run-PC.tif
The projection string above needs to be modified for your area of
interest. It is strongly suggested to use an auto-determined UTM or polar
stereographic projection (:numref:`point2dem_proj`).
The value of the ``--tr`` option is the ground sample distance. It is normally
0.5 to 0.7 meters for Pleiades PAN images. The XML files should have the GSD
value.
To not use bundle-adjusted cameras, remove the option ``--bundle-adjust-prefix``
from all ``mapproject`` and ``parallel_stereo`` commands above.
Exact and RPC cameras
~~~~~~~~~~~~~~~~~~~~~
To compare the linescan (exact) and RPC models, run ``cam_test``
(:numref:`cam_test`) as::
cam_test --image img.tif \
--cam1 cam_exact.xml \
--cam2 cam_rpc.xml \
--session1 pleiades --session2 rpc
This should give great agreement when it comes to pixels projected
from one camera to the ground, then projected back to the other
camera::
cam1 to cam2 pixel diff
Max: 0.00304066
cam2 to cam1 pixel diff
Max: 0.00296764
The camera centers computed by the two methods won't agree, because
the RPC camera model does not store the camera center. ASP then
substitutes it with an estimated point on the ray from the camera
center to the ground. This disagreement is not an issue in practice.
Commands similar to the above can be used to compare the exact and RPC
cameras not to each other but against themselves. This tool will also
print timing information for the operation of projecting a pixel to
the ground and back.
.. _pleiades_neo:
Pleiades NEO
~~~~~~~~~~~~
Several peculiarities make the Pleiades NEO data different from 1A/1B (:numref:`pleiades`):
- The tabulated positions and orientations may start slightly after the first
image line and end slightly before the last image line. If these scenarios are
encountered, linear extrapolation based on two nearest values is used to fill
in the missing values and a warning is printed for each such operation.
- There is no field for standard deviation of the ground locations of pixels
projected from the cameras, so error propagation is not possible unless such a
value is specified manually (:numref:`error_propagation`).
- The RPC camera models for a stereo triplet can be rather inconsistent with
each other, resulting in large triangulation error. It is suggested to use
instead the exact linescan camera model.
.. _pleiades_projected:
Pleiades projected images
~~~~~~~~~~~~~~~~~~~~~~~~~
Airbus offers Pleiades ortho images, that are projected onto a surface of
constant height above a datum. A pair of such images can be used for stereo
and terrain creation.
Each ortho image comes with two XML files. The first, with the ``DIM`` prefix,
stores the projection height, in the ``Bounding_Polygon`` XML field, in the
``H`` subfield. This height is in meters, above the WGS84 ellipsoid. This file
lacks the camera model, unlike the earlier products.
The second XML file starts with the ``RPC`` prefix and contains the RPC camera
model.
Given two such images forming a stereo pair, the heights should be manually read
from the ``DIM`` files. Then, ``parallel_stereo`` should be invoked with
the RPC camera files, as discussed in :numref:`mapproj_ortho`.
ASP does not support Airbus images that are orthorectified with a 3D terrain
model, as that terrain model is not known.
.. _airbus_tiled:
Pleiades tiled images
~~~~~~~~~~~~~~~~~~~~~
With some Airbus Pleiades data, each of the left and right images
may arrive broken up into .TIF or .JP2 tiles, with names ending in
R1C1.tif, R2C1.tif, etc.
These need to be mosaicked before being used. That can be done as
follows (individually for the left and right stereo image), using
``gdalbuildvrt`` (:numref:`gdal_tools`)::
gdalbuildvrt vrt.tif *R*C*.tif
This expects any input .tif file to have an associated .tfw (.TFW) file
containing information about how the tiles should be combined.
If both PAN and multispectral tiles are present, use only the PAN ones.
This will create a virtual mosaic, which is just a plain text file having
pointers to the subimages. ASP can use that one as if it were a real image. If
desired, an actual self-contained image can be produced with::
gdal_translate -co TILED=YES -co BLOCKXSIZE=256 -co BLOCKYSIZE=256 \
-co BIGTIFF=IF_SAFER vrt.tif image.tif
Note that the size of this image will be comparable to the sum of sizes
of the original tiles.
The Orfeo Toolbox provides functionality for stitching such images as well.
================================================
FILE: docs/examples/rig.rst
================================================
.. _rig_calibrator_example:
A 3-sensor rig example
^^^^^^^^^^^^^^^^^^^^^^
This is an example using ``rig_calibrator`` (:numref:`rig_calibrator`)
on images acquired in a lab with cameras mounted on the `Astrobee
`_ robot. See :numref:`rig_examples`
for more examples.
An illustration is in :numref:`rig_calibrator_textures`. The dataset
for this example is available `for download
`_.
This robot has three cameras: ``nav_cam`` (wide field of view, using
the fisheye distortion model), ``sci_cam`` (narrow field of view,
using the radtan distortion model), and ``haz_cam`` (has depth
measurements, with one depth xyz value per pixel, narrow field of
view, using the radtan distortion model).
We assume the intrinsics of each sensor are reasonably well-known (but
can be optimized later). Those are set in the rig configuration
(:numref:`rig_config`). The images are organized as as in
:numref:`rig_data_conv`.
The first step is solving for the camera poses, for which we use
``theia_sfm`` (:numref:`theia_sfm`)::
theia_sfm --rig-config rig_input/rig_config.txt \
--images 'rig_input/nav_cam/*.tif
rig_input/haz_cam/*.tif
rig_input/sci_cam/*.tif' \
--out-dir rig_theia
This tool will use the Theia flags file from ``share/theia_flags.txt``
in the software distribution, which can be copied to a new name,
edited, and passed to this program via ``--theia_flags``.
For this example, it is suggested to edit that flags file and set
``--feature_density=DENSE``, as the default number of features found
by Theia may be too low for the images in this dataset.
The created cameras can be visualized as::
view_reconstruction --reconstruction rig_theia/reconstruction-0
See an illustration in :numref:`view_reconstruction`.
The solved camera poses are exported to ``rig_theia/cameras.nvm``. The images
and interest point matches can be visualized in a pairwise manner using
``stereo_gui`` (:numref:`stereo_gui_nvm`) as::
stereo_gui rig_theia/cameras.nvm
The images in the nvm file will be in random order. This will be
rectified by ``rig_calibrator``.
Next, we run ``rig_calibrator``::
float_intr="" # not floating intrinsics
rig_calibrator \
--rig-config rig_input/rig_config.txt \
--nvm rig_theia/cameras.nvm \
--camera-poses-to-float "nav_cam sci_cam haz_cam" \
--intrinsics-to-float "$float_intr" \
--depth-to-image-transforms-to-float "haz_cam" \
--float-scale \
--bracket-len 1.0 \
--bracket-single-image \
--num-iterations 100 \
--num-passes 2 \
--registration \
--hugin-file control_points.pto \
--xyz-file xyz.txt \
--export-to-voxblox \
--out-dir rig_out
The previously found camera poses are read in. They are registered to world
coordinates (this is optional). For that, the four corners of a square with
known dimensions visible in a couple of images were picked at control points in
``Hugin`` (https://hugin.sourceforge.io/) and saved to ``control_points.pto``,
and the corresponding measurements of their coordinates were saved in
``xyz.txt``. See :numref:`rig_calibrator_registration` for more details.
The ``nav_cam`` camera is chosen to be the reference sensor in the rig
configuration. Its poses are allowed to float, that is, to be
optimized (``--camera-poses-to-float``), and the rig transforms from
this one to the other ones are floated as well, when passed in via the
same option. The scale of depth clouds is floated as well
(``--float-scale``).
Here we chose to optimize the rig while keeping the intrinsics
fixed. Floating the intrinsics, especially the distortion parameters,
requires many interest point matches, especially towards image boundary,
and can make the problem less stable. If desired to float them,
one can replace ``float_intr=""`` with::
intr="focal_length,optical_center,distortion"
float_intr="nav_cam:${intr} haz_cam:${intr} sci_cam:${intr}"
which will be passed above to the option ``--intrinsics-to-float``.
In this particular case, the real-world scale (but not orientation) would
have been solved for correctly even without registration, as it would
be inferred from the depth clouds.
Since the ``nav_cam`` camera has a wide field of view, the values
in ``distorted_crop_size`` in the rig configuration are smaller than
actual image dimensions to reduce the worst effects of peripheral
distortion.
One could pass in ``--num-overlaps 10`` to get more interest point
matches than what Theia finds, but this is usually not necessary.
This number better be kept small, especially if the features
are poor, as it may result in many outliers among images that
do not match well.
The value of ``--bracket-len`` should be a little larger than the differences
(in seconds) between the image times for which it is desired to do pose
interpolation in time. The option ``--bracket-single-image`` is used to ensure
that just a single non-reference image is used for each time bracket
(useful if there are too many images).
The options ``--save-pinhole-cameras`` and ``--save-matches``
can be employed to save the pinhole cameras and the interest point matches
in formats understood by ``bundle_adjust`` (:numref:`bundle_adjust`) and
``stereo_gui`` (:numref:`stereo_gui_nvm`), respectively.
See :numref:`rig_opts` for the full list of options.
The obtained point clouds can be fused into a mesh using ``voxblox_mesh``
(:numref:`voxblox_mesh`), using the command::
voxblox_mesh --index rig_out/voxblox/haz_cam/index.txt \
--output_mesh rig_out/fused_mesh.ply \
--min_ray_length 0.1 --max_ray_length 4.0 \
--voxel_size 0.01
This assumes that depth sensors were present. Otherwise, can needs to
create point clouds with stereo, see :numref:`multi_stereo`.
The output mesh is ``fused_mesh.ply``, points no further than 2
meters from each camera center are used, and the mesh is obtained
after binning the points into voxels of 1 cm in size.
Full-resolution textured meshes can be obtained by projecting and
fusing the images for each sensor with ``texrecon``
(:numref:`texrecon`)::
for cam in nav_cam sci_cam; do
texrecon --rig_config rig_out/rig_config.txt \
--camera_poses rig_out/cameras.txt \
--mesh rig_out/fused_mesh.ply \
--rig_sensor ${cam} \
--undistorted_crop_win '1000 800' \
--out_dir rig_out/texture
done
The obtained textured meshes can be inspected for disagreements, by
loading them in MeshLab, as::
meshlab rig_out/fused_mesh.ply \
rig_out/texture/nav_cam/texture.obj \
rig_out/texture/sci_cam/texture.obj
See an illustration in :numref:`rig_calibrator_textures`. See a larger
example in :numref:`sfm_iss`, using two rigs.
================================================
FILE: docs/examples/rpc.rst
================================================
.. _rpc:
RPC camera models
-----------------
Some vendors, such as GeoEye with its Ikonos and two
GeoEye satellites, Airbus, with its SPOT and Pleiades satellites, the
Indian Cartosat-1 satellite, PeruSat-1, the Spanish Deimos 1 and 2,
etc., provide Rational Polynomial Coefficient (RPC) camera models.
(Certain providers also offer exact linescan models. ASP supports the
ones from DigitalGlobe/Maxar (:numref:`dg_tutorial`),
PeruSat-1 (:numref:`perusat1`), Pleiades 1A/1B (:numref:`pleiades`),
SPOT 5 (:numref:`spot5`), and SPOT 6/7 (:numref:`spot67`).)
About RPC
~~~~~~~~~
RPC represents four 20-element polynomials that map geodetic coordinates
(longitude-latitude-height above datum) to image pixels. Since they are
easy to implement and fast to evaluate, RPC represents a universal
camera model providing a simple approximation to complex exact camera
models that are unique to each vendor. The only downside is that it has
less precision in our opinion compared to the exact camera models.
Our RPC read driver is GDAL. If the command ``gdalinfo``
(:numref:`gdal_tools`) can identify the RPC information inside the
headers of your image files (whether that information is actually
embedded in the images, or stored separately in some auxiliary files
with a convention GDAL understands), ASP will likely be able to see it
as well. This means that sometimes we can get away with only providing
a left and right image, with no extra files containing camera
information. This is specifically the case for GeoEye, and
Cartosat-1.
Otherwise, the camera files must be specified separately in XML files, as done
for DigitalGlobe/Maxar images (:numref:`rawdg`) and PeruSat-1.
See :numref:`airbus_tiled` if the input Pleiades images arrive in multiple
tiles.
Examples
~~~~~~~~
Here we work with a GeoEye dataset for Hobart, Australia. As previously stated
in :numref:`dg_tutorial`, these types of images are not ideal for ASP. This is
both a forest and a urban area which makes correlation difficult. ASP was
designed more for modeling bare rock and ice. Any results we produce in other
environments is a bonus but is not our objective.
.. figure:: ../images/examples/geoeye/GeoEye_CloseUp_triple.png
:name: geoeye-nomap-example
Example colorized height map and ortho image output, produced
with ``point2dem`` (:numref:`point2dem`) and ``mapproject``
(:numref:`mapproject`), respectively.
GoeEye's datasets have the RPC coefficients stored as part of the
images. The stereo command is then::
parallel_stereo -t rpc \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
left.tif right.tif \
results/run
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices.
For terrains having steep slopes, we recommend that images be mapprojected onto
an existing DEM before running stereo. This is described in
:numref:`mapproj-example`.
Next, ``point2dem`` (:numref:`point2dem`) is run::
point2dem --auto-proj-center results/run-PC.tif
For some cameras the RPC coefficients are stored in separate files ending in
.RPB or \_RPC.TXT (or in lower-case). These will be loaded automatically and
should not be specified in the stereo command.
For Cartosat data sometimes one should overwrite the \_RPC.TXT files
that are present with the ones that end in RPC_ORG.TXT in order for
stereo to work.
If the RPC cameras are stored separately in XML files, the stereo
command is::
parallel_stereo -t rpc \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
left.tif right.tif \
left.xml right.xml \
results/run
The RPC cameras can be bundle-adjusted (:numref:`bundle_adjust`).
If the RPC coefficients are stored in the input images, ``mapproject``
copies them to the output mapprojected images. If these coefficients
are in the associated .RPB or \_RPC.TXT files, ``mapproject`` creates
such files for each mapprojected image.
See :numref:`other-mapproj` for how ``parallel_stereo`` is invoked
with mapprojected images when the cameras are stored either separately
or part of the images.
.. _rpc_and_ba:
Adjusted RPC cameras
~~~~~~~~~~~~~~~~~~~~
It is suggested to run bundle adjustment (:numref:`bundle_adjust`) before
stereo, to ensure the cameras are self-consistent. An example is in
:numref:`ba_rpc`.
Bundle adjustment produces ``.adjust`` files that have rotation and translation
adjustments to the original cameras. These can be passed to other ASP tools via
the ``--bundle-adjust-prefix`` option.
To make new RPC cameras, with the adjustments already applied to them, use the
``bundle_adjust`` option ``--save-adjusted-rpc``. These are saved in the
``bundle_adjust`` output directory, with names ending in ``.adjusted_rpc.xml``.
These cameras can be used with ASP and third-party software.
Any produced adjusted RPC model file can be loaded by GDAL when reading an image
(including with ``gdalinfo``, :numref:`gdal_tools`) if it is renamed to have the same
name as the image but with the ``.xml`` extension, and no analogously named
``.RPB`` or ``_RPC.txt`` files are present that *may take precedence*. See the
`GeoTiff documentation
`_.
Applying the adjustments refits the RPC models, and should create cameras that
agree well with the ones with the adjustments applied externally.
It is strongly suggested to use the ``cam_test`` program to see how well an
input RPC camera agrees with itself, and the same for testing with the RPC
camera produced as documented here against itself (:numref:`cam_test`). With
this program, choose the value of the option ``--height-above-datum`` to be not
too far from the height offset in the RPC model, or surely within the
acceptable height range of the RPC model, as given by the height offset and
height scale.
This refitting will not work well for Umbra SAR cameras (:numref:`umbra_sar`),
where the height scale parameter is very large and the RPC fit does not work
in the full-height box, but only in a small range around the height offset.
If ``bundle_adjust`` is invoked with 0 iterations, the input RPC and refit
RPC should also be tested for agreement, as then they in principle should be
about the same.
To export an existing RPC camera to XML format without refitting it, use
``cam_gen`` (:numref:`cam_gen_rpc`).
Creation of RPC cameras
~~~~~~~~~~~~~~~~~~~~~~~
In addition to supporting the provided RPC models, ASP provides a
tool named ``cam2rpc`` (:numref:`cam2rpc`), that can be
used to create RPC camera models from ISIS and all other cameras that
ASP understands, including for non-Earth planets (currently only the
Earth, Moon and Mars are supported).
In such situations, the planet datum must be passed to the tools reading the RPC
models, via the ``--datum`` option.
.. _rpc_tri:
Triangulation with RPC cameras
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An RPC camera model is a black-box model, rather than one using rigorous camera
geometry. It is only able to compute a pixel value in the camera given a point
on the ground.
For triangulation, it is necessary to be able to calculate a ray emanating from
a given pixel going to the ground. With RPC cameras, a somewhat indirect method
is used.
A ray is found by determining with a solver two points on the ground that
project into the same pixel, with both points within the lon-lat-height box of
the RPC model.
In the latest ASP build (:numref:`release`), these points are picked at +/-
min(50.0, ``height_scale``) meters from the height offset specified in the RPC
model. A larger range does not make a difference, except for situations when
``height_scale`` does not represent correctly the actual height range the RPC
model was fit on, such as for Umbra SAR (:numref:`umbra_sar`). In earlier ASP
versions, the points were picked at +/- 0.9 times the height scale, which worked
well enough except for SAR.
Another ray is found the same way for the second image. Then, the two rays are
intersected as usual.
Note that the RPC model does not have the concept of camera center. This is set
to a point in the lon-lat-height box that projects into the pixel (0, 0) in the
camera (at maximum height in the box). This is not a serious problem as long as
the camera centers are not used for modeling the orbit or controlling the camera
location. Bundle adjustment and terrain reconstruction are not affected.
================================================
FILE: docs/examples/sfm_iss.rst
================================================
.. _sfm_iss:
Mapping the ISS using 2 rigs with 3 cameras each
------------------------------------------------
This example will show how to use the tools shipped with ASP to create
a 360-degree textured mesh of the `Japanese Experiment Module
`_ (JEM, also
known as Kibo), on the International Space Station. See :numref:`rig_examples`
for more examples.
These tools were
created as part of the `ISAAC
`_
project.
.. _sfm_iss_figure:
Illustration
^^^^^^^^^^^^
.. figure:: ../images/sci_cam_texture.png
:name: sfm_iss_texture
:alt: Rig calibrator texture.
.. figure:: ../images/sci_cam_large_texture.png
:name: sfm_large_iss_texture
:alt: Rig calibrator texture.
.. figure:: ../images/sci_cam_photo.png
:name: sfm_iss_photo
:alt: Rig calibrator photo.
A mesh created with the ``haz_cam`` depth + image sensor
and textured with ``sci_cam`` (top). A zoomed-out version showing
more of the module (middle). A ``sci_cam`` image that
was used to texture the mesh (bottom). The JEM module has many cables
and cargo, and the bot acquired the data spinning in place. This
resulted in some noise and holes in the mesh. The ``sci_cam`` texture was,
however, quite seamless, with very good agreement with the ``nav_cam``
texture (not shown), which shows that the registration was done correctly.
Overview
^^^^^^^^
Two `Astrobee `_ robots, named
*bumble* and *queen*, scanned the JEM, with each doing a portion. Both
robots have a wide-field of view navigation camera (``nav_cam``), a
color camera (``sci_cam``), and a low-resolution depth+intensity camera
(``haz_cam``).
To record the data, the robots took several stops along the center
module axis, and at each stop acquired data from a multitude of
perspectives, while rotating in place.
The data was combined into a sparse SfM map using ``theia_sfm``
(:numref:`theia_sfm`). The camera poses were refined with
``rig_calibrator`` (:numref:`rig_calibrator`). That tool models the
fact that each set of sensors is on a rig (contained within a rigid
robot frame). Then, the depth clouds were fused into a mesh
with ``voxblox_mesh`` (:numref:`voxblox_mesh`), and textured with
``texrecon`` (:numref:`texrecon`).
.. _sfm_iss_acquisition:
Data acquisition strategy
^^^^^^^^^^^^^^^^^^^^^^^^^
If designing a mapping approach, it is best to:
- Have the cameras face the surface being imaged while moving parallel to it,
in a panning motion.
- Ensure consecutively acquired images have about 75% - 90% overlap. This
and the earlier condition result in a solid perspective difference
but enough similarity so that the images are registered successfully.
- If more than one robot (rig) is used, there should be a
decently-sized surface portion seen by more than rig, to be able to
tie them reliably together.
Challenges
^^^^^^^^^^
This example required care to address the in-place rotations, which
resulted in very little perspective change between nearby images
(hence in uncertain triangulated points), a wide range of resolutions
and distances, and occlusions (which resulted in holes). Another
difficulty was the low resolution and unique imaging modality of
``haz_cam``, which made it tricky to find interest point matches
(tie points) to other sensor data.
.. _sfm_iss_processing:
Data processing strategy
^^^^^^^^^^^^^^^^^^^^^^^^
All sensors acquire the data at independent times. The color
``sci_cam`` sensor takes one picture every few seconds, the
``nav_cam`` takes about 2-3 pictures per second, and ``haz_cam`` takes
about 10 pictures per second.
The ``nav_cam`` sensor was chosen to be the *reference* sensor. A set
of images made by this sensor with both robots was selected, ideally
as in :numref:`sfm_iss_acquisition`, and a Structure-from-Motion
*sparse map* was built.
Since the ``haz_cam`` sensor acquires images and depth data frequently
(0.1 seconds), for each ``nav_cam`` image the ``haz_cam`` frame
closest in time was selected and its acquisition timestamp was
declared to be the same as for the ``nav_cam``. Even if this
approximation may result in the geometry moving somewhat, it is likely
to not be noticeable in the final textured mesh.
The same approximation is likely to be insufficient for ``sci_cam``
when tying it to ``nav_cam``, as the time gap is now larger, and it
can result in at least a few pixels of movement whose outcome
will be a very noticeable registration error.
The approach that ``rig_calibrator`` uses is to bracket each
``sci_cam`` image by ``nav_cam`` images, as seen in
:numref:`rig_calibrator_example`, followed by pose interpolation in
time. This however doubles the number of ``nav_cam`` images and the
amount of time for the various iterations that may be needed to refine
the processing. To avoid that, we use the following approach.
We assume that a reasonably accurate rig configuration file for the
2-rig 6-sensor setup already exists, but it may not be fully precise.
It is shown in :numref:`sfm_iss_sample_rig_config`. It can be found
as described in the previous paragraph, on a small subset of the data.
Then, given the SfM sparse map created with ``nav_cam`` only, the
``haz_cam`` images (declared to be acquired at the same time as
``nav_cam``) were inserted into this map via the rig constraint. The
joint map was registered and optimized, while refining the rig
configuration (the transforms between rig sensors). A mesh was created
and textured, for each sensor. Any issues with mesh quality and
registration challenges can be dealt with at this time.
Then, the ``sci_cam`` images were also inserted via the rig
constraint, but not using ``nav_cam`` for bracketing, so the
placement was approximate. Lastly, the combined map was optimized,
while keeping the ``nav_cam`` and ``haz_cam`` poses fixed and refining
the ``sci_cam`` poses without the rig constraint or using the
timestamp information, which allows the ``sci_cam`` poses to move
freely to conform to the other already registered images.
This approach also helps with the fact that the ``sci_cam`` timestamp
can be somewhat unreliable, given that those images are acquired with
a different processor on the robot, so freeing these images from
the rig and time acquisition constraints helps with accuracy.
How all this is done will be shown in detail below.
Installing the software
^^^^^^^^^^^^^^^^^^^^^^^
See :numref:`installation`. The ``bin`` directory of the ASP software
should be added to the ``PATH`` environmental variable. Note that ASP
ships its own version of Python. That can cause conflicts if ROS
and ASP are run in the same terminal.
.. _sfm_isis_data_prep:
Data preparation
^^^^^^^^^^^^^^^^
The Astrobee data is stored in ROS bags (with an exception for
``sci_cam``), with multiple bags for each robot.
sci_cam
~~~~~~~
The ``sci_cam`` data is not stored in bags, but as individual images,
for performance reasons, as the images are too big to publish over ROS.
Their size is 5344 x 4008 pixels. It is suggested to resample them
using the GDAL tool suite shipped with ASP (:numref:`gdal_tools`) as::
gdal_translate -r average -outsize 25% 25% -of jpeg \
input.jpg output.jpg
The obtained images should be distributed in directories
corresponding to the robot, with names like ``my_data/bumble_sci``
and ``my_data/queen_sci`` (:numref:`rig_data_conv`).
nav_cam
~~~~~~~
For each ``sci_cam`` image, the ``nav_cam`` image closest in time will
be extracted, using the ``extract_bag`` tool
(:numref:`ros_tools_extract`). This is illustrated for one of the
robots::
ls my_data/bumble_sci/*.jpg > bumble_sci.txt
/usr/bin/python /path/to/ASP/libexec/extract_bag \
--bag mybag.bag \
--timestamp_list bumble_sci.txt \
--topics "/my/nav_cam/topic" \
--dirs "my_data/bumble_nav" \
--timestamp_tol 1.0
Here we used the fact that the ``sci_cam`` images have the acquisition
timestamp as part of their name (:numref:`rig_data_conv`).
This will create the directory ``my_data/bumble_nav``. The produced
files will contain, as for the other cameras, the timestamp as part of
their name, which will be different but close in time to the
``sci_cam`` timestamp.
The same command should be repeated for the other robot.
Examine the created images with the ``eog`` program on the local
system. Hopefully the images will have enough overlap. If not, images
with intermediate timestamps need to be extracted, with the help
of the timestamp-listing tool (:numref:`ros_tools_list`).
If, on the other hand, the resulting images are very similar,
some may be deleted (also with ``eog``). It is suggested to follow
the principles outlined in :numref:`sfm_iss_acquisition`.
haz_cam
~~~~~~~
As mentioned in :numref:`sfm_iss_processing`, while the ``nav_cam``
and ``sci_cam`` timestamps are kept precise, it makes the problem
much simpler to find the closest ``haz_cam`` images to the chosen
``nav_cam`` images, and to change their timestamps to match ``nav_cam``.
For that, the data should be extracted as follows::
ls my_data/bumble_nav/*.jpg > bumble_nav.txt
/usr/bin/python /path/to/ASP/libexec/extract_bag \
--bag mybag.bag \
--timestamp_list bumble_nav.txt \
--topics "/my/haz_intensity/topic /my/haz_depth/topic" \
--dirs "my_data/bumble_haz my_data/bumble_haz" \
--timestamp_tol 0.2 \
--approx_timestamp
Notice several important differences with the earlier command. We use
the ``nav_cam`` timestamps for querying. The tolerance for how close
in time produced ``haz_cam`` timestamps are to input ``nav_cam``
images is much smaller, and we use the option ``--approx_timestamp``
to change the timestamp values (and hence the names of the produced
files) to conform to ``nav_cam`` timestamps.
This tool is called with two topics, to extract the intensity (image) and
depth (point cloud) datasets, with the outputs going to the same directory
(specified twice, for each topic). The format of the depth clouds
is described in :numref:`point_cloud_format`.
An analogous invocation should happen for the other rig, with the
outputs going to subdirectories for those sensors.
A first small run
^^^^^^^^^^^^^^^^^
The strategy in :numref:`sfm_iss_processing` will be followed.
Consider a region that is seen in all ``nav_cam`` and ``haz_cam``
images (4 sensors in total). We will take advantage of the fact that
each rig configuration is reasonably well-known, so we will create a
map with only the ``nav_cam`` data for both robots, and the other
sensors will be added later. If no initial rig configuration exists,
see :numref:`rig_calibrator_example`.
The initial map
~~~~~~~~~~~~~~~
Create a text file having a few dozen ``nav_cam`` images from both
rigs in the desired region named ``small_nav_list.txt``, with one
image per line. Inspect the images in ``eog``. Ensure that each image
has a decent overlap (75%-90%) with some of the other ones, and they
cover a connected surface portion.
Run ``theia_sfm`` (:numref:`theia_sfm`) with the initial rig
configuration (:numref:`sfm_iss_sample_rig_config`), which we will
keep in a file called ``initial_rig.txt``::
theia_sfm --rig-config initial_rig.txt \
--image-list small_nav_list.txt \
--out-dir small_theia_nav_rig
The images and interest points can be examined in ``stereo_gui``
(:numref:`stereo_gui_nvm`) as::
stereo_gui small_theia_nav_rig/cameras.nvm
Control points
~~~~~~~~~~~~~~
The obtained map should be registered to world coordinates. Looking
ahead, the full map will need registering as well, so it is good to
collect control points over the entire module, perhaps 6-12 of them
(the more, the better), with at least four of them in the small
desired area of interest that is being done now. The process is
described in :numref:`rig_calibrator_registration`. More specific
instructions can be found in the `Astrobee documentation
`_.
If precise registration is not required, one could simply pick some
visible object in the scene, roughly estimate its dimensions, and
create control points based on that. The produced 3D model will then
still be geometrically self-consistent, but the orientation and scale
may be off.
We will call the produced registration files ``jem_map.pto`` and
``jem_map.txt``. The control points for the images in the future map
that are currently not used will be ignored for the time being.
Adding haz_cam
~~~~~~~~~~~~~~
Create a list called ``small_haz_list.txt`` having the ``haz_cam`` images
with the same timestamps as the ``nav_cam`` images::
ls my_data/*_haz/*.jpg > small_haz_list.txt
Insert these in the small map, and optimize all poses together as::
float="bumble_nav bumble_haz queen_nav queen_haz"
rig_calibrator \
--registration \
--hugin-file jem_map.pto \
--xyz-file jem_map.txt \
--use-initial-rig-transforms \
--extra-list small_haz_list.txt \
--rig-config initial_rig.txt \
--nvm small_theia_nav_rig/cameras.nvm \
--out-dir small_rig \
--camera-poses-to-float "$float" \
--depth-to-image-transforms-to-float "$float" \
--float-scale \
--intrinsics-to-float "" \
--num-iterations 100 \
--export-to-voxblox \
--num-overlaps 5 \
--min-triangulation-angle 0.5
The depth files will the same names but with the .pc extension will
will be picked up automatically.
The value of ``--min-triangulation-angle`` filters out rays with a
very small angle of convergence. That usually makes the geometry more
stable, but if the surface is far from the sensor, and there is not
enough perspective difference between images, it may eliminate too many
features. The ``--max-reprojection-error`` option may eliminate
features as well.
Consider adding the option ``--bracket-len 1.0`` that decides the length of
time, in seconds, between reference images used to bracket the other sensor. The
option ``--bracket-single-image`` will allow only one image of any non-reference
sensor to be bracketed.
It is suggested to carefully examine the text printed on screen by this
tool. See :numref:`rig_calibrator_registration` and
:numref:`rig_calibration_stats` for the explanation of some statistics
being produced and their expected values.
Then, compare the optimized configuration file
``small_rig/rig_config.txt`` with the initial guess rig
configuration. The scales of the matrices in the
``depth_to_image_transform`` fields for both sensors should remain
quite similar to each other, while different perhaps from their
initial values in the earlier file, otherwise the results later will
be incorrect. If encountering difficulties here, consider not
floating the scales at all, so omitting the ``--float-scale`` option
above. The scales will still be adjusted, but not as part of the
optimization but when the registration with control points
happens. Then they will be multiplied by the same factor.
Open the produced ``small_rig/cameras.nvm`` file in ``stereo_gui`` and
examine the features between the ``nav_cam`` and ``haz_cam``
images. Usually they are very few, but hopefully at least some are
present.
Notice that in this run we do not optimize the intrinsics, only the
camera poses and depth-to-image transforms. If desired to do so,
optimizing the focal length may provide the most payoff, followed by
the optical center. It can be tricky to optimize the distortion model,
as one needs to ensure there are many features at the periphery of
images where the distortion is strongest.
It is better to avoid optimizing the intrinsics unless the final
texture has subtle misregistration, which may due to intrinsics. Gross
misregistration is usually due to other factors, such as insufficient
features being matched between the images. Or, perhaps, not all images
that see the same view have been matched together.
Normally some unmodeled distortion in the images is tolerable
if there are many overlapping images, as then their central areas are
used the most, and the effect of distortion on the final textured
mesh is likely minimal.
Mesh creation
~~~~~~~~~~~~~
The registered depth point clouds can be fused with ``voxblox_mesh``
(:numref:`voxblox_mesh`)::
cat small_rig/voxblox/*haz*/index.txt > \
small_rig/all_haz_index.txt
voxblox_mesh \
--index small_rig/all_haz_index.txt \
--output_mesh small_rig/fused_mesh.ply \
--min_ray_length 0.1 \
--max_ray_length 2.0 \
--median_filter '5 0.01' \
--voxel_size 0.01
The first line combines the index files for the ``bumble_haz`` and
``queen_haz`` sensors.
The produced mesh can be examined in ``meshlab``. Normally it should
be quite seamless, otherwise the images failed to be tied properly
together. There can be noise where the surface being imaged has black
objects (which the depth sensor handles poorly), cables, etc.
Some rather big holes can be created in the occluded areas.
To not use all the input images and clouds, the index file passed in
can be edited and entries removed. The names in these files are in
one-to-one correspondence with the list of ``haz_cam`` images used
earlier.
The options ``--min_ray_length`` and ``--max_ray_length`` are used to
filter out depth points that are too close or too far from the sensor.
The mesh should be post-processed with the CGAL tools
(:numref:`cgal_tools`). It is suggested to first remove most small
connected components, then do some smoothing and hole-filling, in
this order. Several iterations of may be needed, and some tuning of
the parameters.
Texturing
~~~~~~~~~
Create the ``nav_cam`` texture with ``texrecon``
(:numref:`texrecon`)::
sensor="bumble_nav haz queen_nav"
texrecon \
--rig_config small_rig/rig_config.txt \
--camera_poses small_rig/cameras.txt \
--mesh small_rig/fused_mesh.ply \
--rig_sensor "${sensor}" \
--undistorted_crop_win '1300 1200' \
--skip_local_seam_leveling \
--out_dir small_rig
The same can be done for ``haz_cam``. Then reduce the undistorted crop
window to '250 200'. It is helpful to open these together in
``meshlab`` and see if there are seams or differences between them.
To use just a subset of the images, see the ``--subset`` option. That
is especially important if the robot spins in place, as then some of
the depth clouds have points that are far away and may be less
accurate.
When working with ``meshlab``, it is useful to save for the future
several of the "camera views", that is, the perspectives from which
the meshes were visualized, and load them next time around. That is
done from the "Window" menu, in reasonably recent ``meshlab``
versions.
Adding sci_cam
~~~~~~~~~~~~~~
If the above steps are successful, the ``sci_cam`` images for the
same region can be added in, while keeping the cameras for the sensors
already solved for fixed. This goes as follows::
ls my_data/*_sci/*.jpg > small_sci_list.txt
float="bumble_sci queen_sci"
rig_calibrator \
--use-initial-rig-transforms \
--nearest-neighbor-interp \
--no-rig \
--bracket-len 1.0 \
--extra-list small_sci_list.txt \
--rig-config small_rig/rig_config.txt \
--nvm small_rig/cameras.nvm \
--out-dir small_sci_rig \
--camera-poses-to-float "$float" \
--depth-to-image-transforms-to-float "$float" \
--intrinsics-to-float "" \
--num-iterations 100 \
--export-to-voxblox \
--num-overlaps 5 \
--min-triangulation-angle 0.5
The notable differences with the earlier invocation is that this time
only the ``sci_cam`` images are optimized (floated), the option
``--nearest-neighbor-interp`` is used, which is needed since the
``sci_cam`` images will not have the same timestamps as for the
earlier sensor, and the option ``--no-rig`` was added, which decouples
the ``sci_cam`` images from the rig, while still optimizing them with
the rest of the data, which is fixed and used as a constraint. The
option ``--bracket-len`` helps with checking how far in time newly
added images are from existing ones.
The texturing command is::
sensor="bumble_sci queen_sci"
texrecon \
--rig_config small_sci_rig/rig_config.txt \
--camera_poses small_sci_rig/cameras.txt \
--mesh small_rig/fused_mesh.ply \
--rig_sensor "${sensor}" \
--undistorted_crop_win '1300 1200' \
--skip_local_seam_leveling \
--out_dir small_sci_rig
Notice how we used the rig configuration and poses from
``small_sci_rig`` but with the earlier mesh from ``small_rig``. The
sensor names now refer to ``sci_cam`` as well.
The produced textured mesh can be overlaid on top of the earlier ones
in ``meshlab``.
Results
^^^^^^^
See :numref:`sfm_iss`.
Scaling up the problem
^^^^^^^^^^^^^^^^^^^^^^
If all goes well, one can map the whole module. Create several lists
of ``nav_cam`` images corresponding to different module portions. For
example, for the JEM, which is long in one dimension, one can
subdivide it along that axis.
Ensure that the portions have generous overlap, so many images
show up in more than one list, and that each obtained group of images
forms a connected component. That is to say, the union of surface
patches as seen from all images in a group should be a contiguous
surface.
For example, each group can have about 150-200 images, with 50-75
images being shared with each neighboring group. More images being
shared will result in a tighter coupling of the datasets and in less
registration error.
Run ``theia_sfm`` on each group of ``nav_cam`` images. A run can take
about 2 hours. While in principle this tool can be run on all images at
once, that may take longer than running it on smaller sets with
overlaps, unless one has under 500 images or so.
The obtained .nvm files can be merged with ``sfm_merge``
(:numref:`sfm_merge`) as::
sfm_merge --fast_merge --rig_config small_rig/rig_config.txt \
theia*/cameras.nvm --output_map merged.nvm
Then, given the large merged map, one can continue as earlier in the
document, with registration, adding ``haz_cam`` and ``sci_cam``
images, mesh creation, and texturing.
Fine-tuning
^^^^^^^^^^^
If the input images show many perspectives and correspond to many
distances from the surface being imaged, all this variety is good for
tying it all together, but can make texturing problematic.
It is suggested to create the fused and textured meshes (using
``voxblox_mesh`` and ``texrecon``) only with subsets of the depth
clouds and images that are closest to the surface being imaged and
face it head-on. Both of these tools can work with a subset of the
data. Manual inspection can be used to delete the low-quality inputs.
Consider experimenting with the ``--median_filter``,
``--max_ray_length``, and ``--distance_weight`` options in
``voxblox_mesh`` (:numref:`voxblox_mesh`).
Some experimentation can be done with the two ways of creating
textures given by the ``texrecon`` option ``--texture_alg``
(:numref:`texrecon`). The default method, named "center", uses the
most central area of each image, so, if there are any seams when the
the camera is panning, they will be when transitioning from a surface
portion using one image to a different one. The other approach, called
"area", tries for every small surface portion to find the camera whose
direction is more aligned with the surface normal. This may give
better results when imaging a round object from many perspectives.
In either case, seams are a symptom of registration having failed.
It is likely because not all images seeing the same surface have been
tied together. Or, perhaps the intrinsics of the sensors were
inaccurate.
.. _map_surgery:
Surgery with maps
^^^^^^^^^^^^^^^^^
If a produced textured mesh is mostly good, but some local portion has
artifacts and may benefit from more images and/or depth clouds,
either acquired in between existing ones or from a new
dataset, this can be done without redoing all the work.
A small portion of the existing map can be extracted with the
``sfm_submap`` program (:numref:`sfm_submap`), having just ``nav_cam``
images. A new small map can be made with images from this map and
additional ones using ``theia_sfm``. This map can be merged into the
existing small map with ``sfm_merge --fast_merge``
(:numref:`sfm_merge`). If the first map passed to this tool is the
original small map, its coordinate system will be kept, and the new
Theia map will conform to it.
Depth clouds for the additional images can be extracted. The combined
small map can be refined with ``rig_calibrator``, and depth clouds
corresponding to the new data can be inserted, as earlier. The option
``--fixed-image-list`` can be used to keep some images (from the
original small map) fixed to not change the scale or position of the
optimized combined small map.
These operations should be quite fast if the chosen subset of data is
small.
Then, a mesh can be created and textured just for this
data. If happy with the results, this data can then be merged into the
original large map, and the combined map can be optimized as before.
.. _sfm_iss_sample_rig_config:
Sample rig configuration
^^^^^^^^^^^^^^^^^^^^^^^^
This is a rig configuration file having two rigs, with the
reference sensor for each given by ``ref_sensor_name``.
The reference documentation is in :numref:`rig_config`.
::
ref_sensor_name: bumble_nav
sensor_name: bumble_nav
focal_length: 608
optical_center: 632.53683999999998 549.08385999999996
distortion_coeffs: 0.99869300000000005
distortion_type: fov
image_size: 1280 960
distorted_crop_size: 1200 900
undistorted_image_size: 1200 1000
ref_to_sensor_transform: 1 0 0 0 1 0 0 0 1 0 0 0
depth_to_image_transform: 1 0 0 0 1 0 0 0 1 0 0 0
ref_to_sensor_timestamp_offset: 0
sensor_name: bumble_haz
focal_length: 206.19094999999999
optical_center: 112.48999000000001 81.216598000000005
distortion_coeffs: -0.25949800000000001 -0.084849339999999995 0.0032980310999999999 -0.00024045673000000001
distortion_type: radtan
image_size: 224 171
distorted_crop_size: 224 171
undistorted_image_size: 250 200
ref_to_sensor_transform: -0.99936179050661522 -0.011924032028375218 0.033672379416940734 0.013367103760211168 -0.99898730194891616 0.042961506978788616 0.033126005078727511 0.043384190726704089 0.99850912854240503 0.03447221364702744 -0.0015773141724172662 -0.051355063495492494
depth_to_image_transform: 0.97524944805399405 3.0340999964032877e-05 0.017520679036474685 -0.0005022892199844 0.97505286059445628 0.026270283519653003 -0.017513503933106297 -0.02627506746113482 0.97489556315227599 -0.012739449966153971 -0.0033893213295227856 -0.062385053248766351
ref_to_sensor_timestamp_offset: 0
sensor_name: bumble_sci
focal_length: 1023.6054
optical_center: 683.97547 511.2185
distortion_coeffs: -0.025598438 0.048258987 -0.00041380657 0.0056673533
distortion_type: radtan
image_size: 1336 1002
distorted_crop_size: 1300 1000
undistorted_image_size: 1300 1200
ref_to_sensor_transform: 0.99999136796886101 0.0041467228570910052 0.00026206356569790089 -0.0041456529387620027 0.99998356891519313 -0.0039592248413610866 -0.00027847706785526265 0.0039581042406176921 0.99999212789968661 -0.044775738667823875 0.022844481744319863 0.016947323592326858
depth_to_image_transform: 1 0 0 0 1 0 0 0 1 0 0 0
ref_to_sensor_timestamp_offset: 0.0
ref_sensor_name: queen_nav
sensor_name: queen_nav
focal_length: 604.39999999999998
optical_center: 588.79561999999999 509.73835000000003
distortion_coeffs: 1.0020100000000001
distortion_type: fov
image_size: 1280 960
distorted_crop_size: 1200 900
undistorted_image_size: 1200 1000
ref_to_sensor_transform: 1 0 0 0 1 0 0 0 1 0 0 0
depth_to_image_transform: 1 0 0 0 1 0 0 0 1 0 0 0
ref_to_sensor_timestamp_offset: 0
sensor_name: queen_haz
focal_length: 210.7242
optical_center: 124.59857 87.888262999999995
distortion_coeffs: -0.37295935000000002 -0.011153150000000001 0.0029100743 -0.013234186
distortion_type: radtan
image_size: 224 171
distorted_crop_size: 224 171
undistorted_image_size: 250 200
ref_to_sensor_transform: -0.99983878639670731 -0.0053134634698496939 -0.017151335887125228 0.0053588429200665524 -0.99998225876857605 -0.0026009518744718949 -0.017137211538534192 -0.0026924438805366263 0.9998495220415089 0.02589135325068561 0.0007771584936297031 -0.025089928702394019
depth_to_image_transform: 0.96637484988953426 -0.0010183057117133798 -0.039142369279180113 0.00078683373128646066 0.96715045575148029 -0.005734923775739747 0.039147706343916511 0.0056983779719958138 0.96635836939244701 -0.0079348421014152053 -0.0012389803763148686 -0.053366194196969058
ref_to_sensor_timestamp_offset: 0
sensor_name: queen_sci
focal_length: 1016.3726
optical_center: 689.17409 501.88817
distortion_coeffs: -0.019654579 0.024057067 -0.00060629998 0.0027509131
distortion_type: radtan
image_size: 1336 1002
distorted_crop_size: 1300 1000
undistorted_image_size: 1300 1200
ref_to_sensor_transform: 0.99999136796886101 0.0041467228570910052 0.00026206356569790089 -0.0041456529387620027 0.99998356891519313 -0.0039592248413610866 -0.00027847706785526265 0.0039581042406176921 0.99999212789968661 -0.044775738667823875 0.022844481744319863 0.016947323592326858
depth_to_image_transform: 1 0 0 0 1 0 0 0 1 0 0 0
ref_to_sensor_timestamp_offset: 0
================================================
FILE: docs/examples/sfs_ctx.rst
================================================
.. _sfs_ctx:
Shape-from-Shading with CTX images
----------------------------------
This example shows how to refine a stereo terrain model produced with CTX images
(:numref:`ctx_example`) with Shape-from-Shading (SfS, :numref:`sfs`). See
:numref:`sfs_usage` for an overview and examples for other planets.
.. _sfs_ctx_results:
Results
~~~~~~~
.. figure:: ../images/ctx_sfs_zoom_in.png
:name: ctx_sfs_zoom_in
:alt: ctx_sfs_zoom_in
:align: left
From left to right: (a) a terrain model produced with CTX images and stereo, (b)
the same terrain refined with SfS and CTX images, (c) corresponding terrain
produced with stereo with HiRISE images. SfS adds notably more detail to the CTX
DEM.
.. figure:: ../images/ctx_sfs_dem_err.png
:name: ctx_sfs_dem_err
:alt: ctx_sfs_dem_err
:align: left
A larger area (about 1100 x 1100 pixels, at 6 m/pixel). The site is inside the
Jezero Crater. The top row shows, as before, the CTX stereo terrain, CTX
SfS-refined terrain, and a HiRISE terrain for comparison. The bottom row has the
differences to the HiRISE stereo DEM of the CTX stereo DEM (left) and SfS-refined
DEM (right). The SfS-refined terrain shows somewhat improved agreement with
the HiRISE terrain. The range of colors is from -10 to 10 meters. There seems to
be some local vertical bias between the CTX and HiRISE terrains that is
unrelated to SfS refinement.
.. figure:: ../images/ctx_ortho.png
:name: ctx_ortho
:alt: ctx_ortho
:align: left
Two input CTX images with different illumination conditions. There are notable
lighting differences on the mound in the upper-right area.
Preparation
~~~~~~~~~~~
How to fetch and prepare the images is described in :numref:`ctx_example`.
The camera models are produced as in :numref:`create_csm_linescan`.
It is important to have images with very diverse illumination conditions (Sun
azimuth). The azimuth angle can be found with ``sfs --query``. More details are
in :numref:`sfs_azimuth`.
Some of these images will be used to create terrain models with stereo. Any
stereo pair should have similar illumination and a non-small convergence angle
(about 30 degrees is feasible for CTX). See :numref:`stereo_pairs`.
The full dataset for this area had 115 images. It is recommended to fetch a
lot of images first and then select a subset after inspection.
The Sun azimuth angle was between -141.5 and -72.8 degrees. This is diverse
enough, but a larger range would have been preferable. No data with the Sun
on the right was found.
The images were bundle-adjusted (:numref:`bundle_adjust`). The resulting model
state files (:numref:`csm_state`) were used at all stages of subsequent
processing.
Three DEMs were produced with ``parallel_stereo`` (:numref:`parallel_stereo`)
and ``point2dem`` (:numref:`point2dem`). It is strongly advised to use the
``aspm_mgm`` algorithm (:numref:`nextsteps`) and mapprojection
(:numref:`mapproj-example`).
CTX images can have very notable jitter (:numref:`jitter_ctx`), and there may be
some unmodeled lens distortion. It is suggested to mosaic the produced DEMs
with ``dem_mosaic`` (:numref:`dem_mosaic`) and then run ``geodiff`` (:numref:`geodiff`)
to compare the mosaic with each individual DEM. The least consistent DEMs
should be discarded, and the remaining ones mosaicked together.
Any holes in the produced DEM should be filled, and a bit of blur is suggested
(:numref:`dem_mosaic_extrapolate`).
The resulting mosaic will be the input for SfS refinement.
A HiRISE (:numref:`hirise_example`) stereo DEM needs to be created as well. It
will help evaluate the results, as it is at a much higher resolution.
The HiRISE DEM can be brought into alignment with the CTX one with ``pc_align``
(:numref:`pc_align`).
Running SfS
~~~~~~~~~~~
The ids of the CTX images that were input to SfS were as follows. The Sun
azimuth and elevation are measured in degrees from the North and the horizon,
respectively.
.. list-table::
:header-rows: 1
* - image_id
- azimuth
- elevation
* - J10_048842_1986_XN_18N282W
- -141.468
- 36.741
* - D14_032794_1989_XN_18N282W
- -117.901
- 52.206
* - F05_037752_2008_XN_20N282W
- -102.218
- 33.286
* - F03_037119_2001_XN_20N283W
- -90.224
- 38.861
* - J22_053233_1984_XN_18N282W
- -72.785
- 45.893
The ``parallel_sfs`` (:numref:`parallel_sfs`) command was::
parallel_sfs \
--processes 4 \
--threads 8 \
--save-sparingly \
--tile-size 400 \
--padding 50 \
--nodes-list machines.txt \
-i ctx_dem.tif \
--image-list image_list.txt \
--camera-list camera_list.txt \
--smoothness-weight 0.01 \
--initial-dem-constraint-weight 0.0002 \
--reflectance-type 0 \
--num-haze-coeffs 1 \
--max-iterations 5 \
--float-exposure \
--float-haze \
--float-albedo \
--crop-input-images \
-o sfs/run
This produced the DEM named ``sfs/run-DEM-final.tif``. See :numref:`sfs` for the
description of these options and the output files.
It was very important to model and optimize the albedo, image exposure, and
atmospheric haze.
There was notable sensitivity on the value of
``--initial-dem-constraint-weight``. That is likely because the input CTX DEM
and the reference HiRISE DEM appear to be somewhat inconsistent. Making this
value smaller resulted in the SfS DEM being in less agreement with HiRISE.
Somewhat surprisingly, the plain Lambertian reflectance (``--reflectance-type 0``)
worked about the same as the Lunar-Lambertian model (type 1),
with the Hapke model with default parameters (type 2) doing
just a very tiny bit better.
Co-optimizing the reflectance model coefficients (option
``--float-reflectance-model``) did not make a difference.
The value of ``--smoothness-weight`` could have been increased a bit, as
some numerical noise is visible.
The results are in :numref:`sfs_ctx_results`.
Further thoughts
~~~~~~~~~~~~~~~~
The shadows were not modeled here. Given that the Sun was rather high in the sky,
with various azimuth and elevation values, their effect is likely not very
strong. Pixels in shadows can be given less weight with the ``sfs`` parameter
``--robust-threshold``. This will penalize pixels for which the disagreement
between the simulated and measured images is roughly more than this
quantity. See :numref:`sfs_earth` for an example and discussion.
The disagreement between the stereo CTX and HiRISE terrains (that is seen even
before SfS) can be made smaller by individually aligning SfS-refined small tiles
(with overlap) to HiRISE, followed by mosaicking. That can make it easier to see
where SfS still needs improvement.
================================================
FILE: docs/examples/sfs_earth.rst
================================================
.. _sfs_earth:
Shape-from-Shading for Earth
----------------------------
This example shows how to refine a terrain model for Earth using
Shape-from-Shading (SfS, :numref:`sfs`). An overview and examples for other
planets are given in :numref:`sfs_usage`.
.. figure:: ../images/earth_closeup.png
:name: earth_input_images
:alt: earth_input_images
Top: Four orthorectified input images showing the diversity of illumination. Bottom left: Hillshaded DEM produced with Agisoft Phtoscan. Bottom right: Hillshaded DEM refined with SfS. It can be seen that the SfS DEM has more detail. This is a small region of the test site.
.. figure:: ../images/earth_stereo_sfs_dem.png
:name: earth_stereo_sfs_dem
:alt: earth_stereo_sfs_dem
Left: Full-site hillshaded input stereo DEM (10k x 10k pixels at 0.01 m/pixel). Right: Refined full-site SfS DEM. More detail is seen. No shadow artifacts or strong dependence on albedo are observed.
.. figure:: ../images/earth_ortho_sfs_diff.png
:name: earth_ortho_sfs_diff
:alt: earth_ortho_sfs_diff
Left: Max-lit orthoimage (this eliminates shadows). Right: SfS DEM minus the input DEM.
The range of colors is between -0.1 to 0.1 meters. We do not have rigorous validation,
but these results look plausible.
Earth-specific issues
~~~~~~~~~~~~~~~~~~~~~
We will produce a terrain model for the *Lunar Surface Proving Ground* (LSPG)
at the Mojave Air and Space Port in California (35.072321 N, 118.153957 W).
The site has dimensions of 100 x 100 meters.
This site is meant to mimic the topography and optical properties of Moon's
surface. It has very strong albedo variations that need modeling. Being on
Earth, the site has an atmosphere that scatters sunlight, that needs to be taken
into account as well.
It is likely that other rocky Earth terrains will have similar properties.
Surfaces with vegetation, fresh snow, or urban areas will be very different,
and likely for those the SfS method will not work well.
Input data
~~~~~~~~~~
The site was imaged with an UAS flying at an elevation of about 100 meters. The
images are acquired with a color frame camera, looking nadir, with dimensions of
9248 x 6944 pixels, JPEG-compressed. The ground resolution is 0.01 meters per
pixel.
The camera was carefully calibrated, with its intrinsic parameters (focal
length, optical center, lens distortion) known.
Five sets of images were recorded, at different times of day. Diverse
illumination is very important for separating the albedo from ground reflectance
and atmospheric effects.
Since SfS processes grayscale data, the red image band was used.
Registration and initial model
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SfS expects a reasonably accurate DEM as input, that will be refined. The camera
intrinsics, and their positions and orientations must be known very accurately
relative to the DEM. The ``mapproject`` program (:numref:`mapproject`) can be
invoked to verify the agreement between these data.
The usual process of producing such data with ASP is to run Structure-from-Motion
(:numref:`sfm`), including bundle adjustment (:numref:`bundle_adjust`), followed
by stereo (:numref:`tutorial`) for pairs of images with a good convergence angle
(:numref:`stereo_pairs`), terrain model creation (:numref:`point2dem`), and
merge of the terrain models (:numref:`dem_mosaic`).
If desired to refine the intrinsics, the ``bundle_adjust`` program can be run
(:numref:`heights_from_dem`), with the terrain produced so far as a constraint.
If needed, alignment to a prior terrain can be done (:numref:`pc_align`),
followed by carrying over the cameras (:numref:`ba_pc_align`).
The ``stereo_gui`` program (:numref:`stereo_gui`) can help visualize the inputs,
intermediate results, and final products.
Use of prior data
~~~~~~~~~~~~~~~~~
In this example, all this processing was done with Agisoft Photoscan, a commercial
package that automates the steps mentioned above. It produced a terrain model,
orthoimages, the camera intrinsics, and the camera positions and orientations.
Camera preparation
~~~~~~~~~~~~~~~~~~
A pinhole camera model file (:numref:`pinholemodels`) was created for each
image.
To ensure tight registration, a GCP file (:numref:`bagcp`) was made for each
image with the ``gcp_gen`` program (:numref:`gcp_gen`). The inputs were the raw
images, orthoimages, and the existing DEM. The invocation was as follows, for
each image index ``i``::
gcp_gen \
--ip-detect-method 2 \
--inlier-threshold 50 \
--ip-per-tile 1000 \
--gcp-sigma 0.1 \
--camera-image image${i}.tif \
--ortho-image ortho_image.tif \
--dem dem.tif \
--output-prefix gcp/run \
-o gcp/image${i}.gcp
A single orthoimage was provided for all images with the same illumination.
This program's page has more information for how to inspect and validate
the GCP file.
If the camera positions and orientations are not known, such a GCP
file can create the camera files from scratch (:numref:`cam_from_gcp`).
The images and cameras were then bundle-adjusted (:numref:`bundle_adjust`),
together with these GCP. The provided DEM was used as a constraint, with the
options ``--heights-from-dem dem.tif --heights-from-dem-uncertainty 10.0``. The
latter parameter's value was to give less weight to the DEM than to the
GCP (see ``--gcp-sigma`` above), as the GCP are known to be quite accurate.
The ``mapproject`` program (:numref:`mapproject`) was run to verify that the
produced cameras result in orthoimages that agree well with the input DEM and
each other.
It is strongly suggested to first run this process with a small subset of the
images, for example, one for each illumination. One should also inspect the
various ``bundle_adjust`` report files (:numref:`ba_out_files`).
Terrain model preparation
~~~~~~~~~~~~~~~~~~~~~~~~~
The input terrain was regridded to a resolution of 0.01 meters per pixel
with ``gdalwarp`` (:numref:`gdal_tools`)::
gdalwarp \
-overwrite \
-r cubicspline \
-tr 0.01 0.01 \
dem.tif dem_tr0.01.tif
It is important to use a local projection in meters, such as UTM. This program
can also resample an input DEM that has a geographic projection
(longitude-latitude) to a local projection, with the option ``-t_srs``.
The produced DEM was smoothed a bit, to reduce the numerical noise::
dem_mosaic \
--dem-blur-sigma 1 \
dem_tr0.01.tif \
-o dem_tr0.01_smooth.tif
The resulting DEM can be hillshaded and visualized in ``stereo_gui`` (:numref:`stereo_gui_hillshade`).
Illumination angles
~~~~~~~~~~~~~~~~~~~
The illumination information was specified in a file named ``sfs_sun_list.txt``,
with each line having the image name and the Sun azimuth and elevation
(altitude) in degrees, in double precision, with a space as separator. The
azimuth is measured clockwise from the North, and the elevation is measured from
the horizon.
The `SunCalc `_ site was very useful in determining
this information, given the coordinates of the site and the image acquisition
time as stored in the EXIF data. One has to be mindful of local vs UTC time.
It was sufficient to use the same Sun azimuth and elevation for all images
acquired in quick succession.
Input images
~~~~~~~~~~~~
The number of input images can be very large, which can slow down the SfS
program. It is suggested to divide them into groups, by illumination conditions,
and ignore those outside the area of interest. ASP has logic that can help
with that (:numref:`sfs_azimuth`).
Out of all images from a given group, a subset should be selected that covers
the site fully. That can be done by mapprojecting the images onto the DEM, and
then running the ``image_subset`` program (:numref:`image_subset`)::
image_subset \
--t_projwin min_x min_y max_x max_y \
--threshold 0.01 \
--image-list image_list.txt \
-o subset.txt
The values passed in via ``--t_projwin`` have the desired region extent (it can
be found with ``gdalinfo``, :numref:`gdal_tools`), or with ``stereo_gui``. It is
optional.
For an initial run, it is simpler to manually pick an image from each group.
The raw camera images corresponding to the union of all such subsets
were put in a file named ``sfs_image_list.txt``. The corresponding camera
model files were listed in the file ``sfs_camera_list.txt``, one per line.
These must be in the same order.
Running SfS
~~~~~~~~~~~
The best SfS results were produced by first estimating the image exposures, haze,
and a low-resolution albedo for the full site, then refining all these further
per tile.
This all done under the hood by ``parallel_sfs`` (:numref:`parallel_sfs`) in the
latest build (:numref:`release`). The command is::
parallel_sfs \
-i dem.tif \
--image-list sfs_image_list.txt \
--camera-list sfs_camera_list.txt \
--sun-angles sfs_sun_list.txt \
--processes 6 \
--threads 8 \
--tile-size 200 \
--padding 50 \
--blending-dist 10 \
--smoothness-weight 3 \
--robust-threshold 10 \
--reflectance-type 0 \
--num-haze-coeffs 1 \
--initial-dem-constraint-weight 0.001 \
--albedo-robust-threshold 0.025 \
--crop-input-images \
--save-sparingly \
--max-iterations 5 \
-o sfs/run
This program can be very sensitive to the smoothness weight. A higher value will
produce blurred results, while a lower value will result in a noisy output. One
could try various values for it that differ by a factor of 10 before refining it
further.
The ``--robust-threshold`` parameter is very important for eliminating the
effect of shadows. Its value should be a fraction of the difference in intensity
between lit and shadowed pixels. Some experimentation may be needed to find the
right value. A large value will result in visible shadow artifacts. A smaller
value may require more iterations and may blur more the output.
It is strongly suggested to first run SfS on a small clip to get an intuition
for the parameters (then can use the ``sfs`` program directly).
We used the Lambertian reflectance model (``--reflectance-type 0``). For the Moon,
usually the Lunar-Lambertian model is preferred (value 1).
The produced DEM will be named ``sfs/run-DEM-final.tif``. Other outputs are
listed in :numref:`sfs_outputs`.
The results are shown in :numref:`earth_input_images`.
================================================
FILE: docs/examples/skysat.rst
================================================
.. _skysat:
SkySat Stereo and Video data
----------------------------
SkySat is a constellation of sub-meter resolution Earth observation
satellites owned by *Planet*. There are two type of SkySat
products, *Stereo* and *Video*, with each made up of
sequences of overlapping images. Their processing is described in
:numref:`skysat_stereo` and :numref:`skysat_video`, respectively.
SkySat images are challenging to process with ASP because they come in
a very long sequence, with small footprints, and high focal length. It
requires a lot of care to determine and refine the camera positions
and orientations.
A very informative paper on processing SkySat data with ASP is
:cite:`bhushan2021automated`, and their workflow is `publicly
available `_.
.. _skysat_stereo:
Stereo data
~~~~~~~~~~~
The SkySat *Stereo* products may come with Pinhole cameras
(stored in files with the ``_pinhole.json`` suffix) and/or with RPC
cameras (embedded in the TIF images or in files with the ``_RPC.txt``
suffix).
This product may have images acquired with either two or three
perspectives, and for each of those there are three sensors with
overlapping fields of view. Each sensor creates on the order of 300
images with much overlap among them.
Individual pairs of stereo images are rather easy to process with ASP,
following the example in :numref:`rpc`. Here we focus on creating
stereo from the full sequences of images.
Due to non-trivial errors in each provided camera's position and orientation,
it was found necessary to convert the given cameras to ASP's
Pinhole format (:numref:`pinholemodels`) and then run bundle
adjustment (:numref:`bundle_adjust`), to refine the camera
poses. (Note that for RPC cameras, this conversion decouples the
camera intrinsics from their poses.) Then, pairwise stereo is run, and
the obtained DEMs are mosaicked.
A possible workflow is as follows. (Compare this with the processing
of Video data in :numref:`skysat_video`. This section is newer, and if
in doubt, use the approach here.)
Creation of input cameras
^^^^^^^^^^^^^^^^^^^^^^^^^
Pinhole cameras can be created with ``cam_gen`` (:numref:`cam_gen`).
Two approaches can be used. The first is to ingest SkySat's provided
Pinhole cameras, which have a ``_pinhole.json`` suffix.
::
pref=img/1259344359.55622339_sc00104_c2_PAN_i0000000320
cam_gen ${pref}.tif \
--input-camera ${pref}_pinhole.json \
-o ${pref}.tsai
This approach is preferred. Specify a .json extension if desired
to mix and match various sensor types (:numref:`ba_frame_linescan`).
With SkySat, it is suggested not to refine the vendor-provided cameras at this
stage, but do a straightforward conversion only. Note that this was tested only
with the L1A SkySat product.
Alternatively, if the ``pinhole.json`` files are not available,
a Pinhole camera can be derived from each of their RPC
cameras.
::
pref=img/1259344359.55622339_sc00104_c2_PAN_i0000000320
cam_gen ${pref}.tif \
--input-camera ${pref}.tif \
--focal-length 553846.153846 \
--optical-center 1280 540 \
--pixel-pitch 1.0 \
--reference-dem ref.tif \
--height-above-datum 4000 \
--refine-camera \
--frame-index frame_index.csv \
--parse-ecef \
--cam-ctr-weight 1000 \
--gcp-std 1 \
--gcp-file ${pref}.gcp \
-o ${pref}.tsai
It is very important to examine if the data is of type L1A or L1B. The
value of ``--pixel-pitch`` should be 0.8 in the L1B products, but 1.0
for L1A.
Above, we read the ECEF camera positions from the ``frame_index.csv``
file provided by Planet. These positions are more accurate than what
``cam_gen`` can get on its own based on the RPC camera.
The ``--cam-ctr-weight`` and ``--refine-camera`` options will keep
the camera position in place by penalizing any deviations with the given
weight, while refining the camera orientation.
The reference DEM ``ref.tif`` is a Copernicus 30 m DEM
(:numref:`initial_terrain`). Ensure the DEM is relative to WGS84 and
not EGM96, and convert it if necessary; see :numref:`conv_to_ellipsoid`.
The option ``--input-camera`` will make
use of existing RPC cameras to accurately find the pinhole camera
poses. The option ``--height-above-datum`` should not be necessary if
the DEM footprint covers fully the area of interest.
See :numref:`cam_gen_validation` for how to validate the created cameras.
.. _skysat_bundle_adjustment:
Bundle adjustment
^^^^^^^^^^^^^^^^^
For the next steps, it may be convenient to make symbolic links from
the image names and cameras to something shorter (once relevant
metadata that needs the original names is parsed from
``frame_index.csv``). For example, if all the images and cameras just
produced are in a directory called ``img``, one can do::
cd img
ln -s ${pref}.tif n1000.tif
for the first Nadir-looking image, and similarly for Forward and
Aft-looking images and cameras, if available, and their associated RPC
metadata files.
For bundle adjustment it may be preferable to have the lists of images
and pinhole cameras stored in files, as otherwise they may be too many
to individually pass on the command line.
::
ls img/*.tif > images.txt
ls img/*.tsai > cameras.txt
The entries in these files must be in the same order.
Then run ``parallel_bundle_adjust`` (:numref:`parallel_bundle_adjust`), rather
than ``bundle_adjust``, as there are very many pairs of images to match.
::
nodesList=machine_names.txt
parallel_bundle_adjust \
--inline-adjustments \
--num-iterations 200 \
--image-list images.txt \
--camera-list cameras.txt \
--tri-weight 0.1 \
--tri-robust-threshold 0.1 \
--rotation-weight 0 \
--camera-position-weight 0 \
--auto-overlap-params "ref.tif 15" \
--min-matches 5 \
--remove-outliers-params '75.0 3.0 20 20' \
--min-triangulation-angle 15.0 \
--max-pairwise-matches 200 \
--nodes-list $nodesList \
-o ba/run
See :numref:`ba_validation` for important sanity checks and report files to
examine after bundle adjustment. See :numref:`cam_gen_validation` for how to
validate the created cameras.
See :numref:`pbs_slurm` for more details on running ASP tools on multiple
machines.
The ``--camera-position-weight`` can be set to a large number to keep the
camera positions move little during bundle adjustment. This is important if
it is assumed that the camera positions are already accurate and it is desired
to only refine the camera orientations. Such a constraint can prevent bundle
adjustment from converging to a solution, so should be used with great care.
See :numref:`ba_cam_constraints`.
The ``--tri-weight`` option (:numref:`ba_ground_constraints`) prevents the
triangulated points from moving too much (a lower weight value will constrain
less). The value of ``--tri-robust-threshold`` (0.1) is intentionally set to be
less than the one used for ``--robust-threshold`` (0.5) to ensure pixel
reprojection errors are always given a higher priority than triangulation
errors. See :numref:`ba_ground_constraints`.
The ``--rotation-weight`` value was set to 0, so the camera orientations can
change with no restrictions. See :numref:`ba_cam_constraints` for a discussion
of camera constraints.
If the input cameras are reasonably accurate to start with, for example,
consistent with a known DEM to within a small handful of meters, that DEM
can be used to constrain the cameras, instead of the triangulation
constraint. So, the above options can be replaced, for example, with::
--heights-from-dem dem.tif \
--heights-from-dem-uncertainty 10.0 \
--heights-from-dem-robust-threshold 0.1 \
The DEM must be relative to the WGS84 ellipsoid, rather than to a geoid,
and the weight and threshold above should be lower if the DEM has higher
uncertainty when it comes to its heights or alignment to the cameras.
See also :numref:`heights_from_dem`.
The option ``--auto-overlap-params`` automatically determines which
image pairs overlap. We used ``--max-pairwise-matches 200`` as
otherwise too many interest point matches were found.
The option ``--mapproj-dem`` (:numref:`ba_mapproj_dem`) can be used to
preview the quality of registration of the images on the ground after
bundle adjustment.
The option ``--min-triangulation-angle 15.0`` filtered out interest
point matches with a convergence angle less than this. This is very
important for creating a reliable sparse set of triangulated points
based on interest point matches (:numref:`ba_out_files`). This one can
be used to compute the alignment transform to the reference terrain::
pc_align --max-displacement 200 \
--csv-format 1:lon,2:lat,3:height_above_datum \
--save-transformed-source-points \
ref.tif ba/run-final_residuals_pointmap.csv \
-o $dir/run
If desired, the obtained alignment transform can be applied to the
cameras as well (:numref:`ba_pc_align`).
Use ``stereo_gui`` to inspect the reprojection errors in the final
``pointmap.csv`` file (:numref:`plot_csv`). See the outcome in
:numref:`skysat_stereo_grand_mesa_pointmap`.
.. _skysat_stereo_grand_mesa_poses:
.. figure:: ../images/skysat_stereo_grand_mesa_poses.png
:name: skysat-stereo-example-poses
:alt: SkySat stereo example camera poses
The roll, pitch, and yaw of the camera orientations before and after bundle
adjustment for the Aft, Forward, and Nadir cameras (for the center sensor of
the Skysat triplet). Plotted with ``orbit_plot.py`` (:numref:`orbit_plot`). The
best linear fit of this data before bundle adjustment was subtracted to
emphasize the differences, which are very small. The cameras centers were
*tightly constrained* here with a large camera position weight. Yet, see
:numref:`skysat_stereo_grand_mesa_pointmap` for the effect on the
reprojection errors.
.. _skysat_stereo_grand_mesa_pointmap:
.. figure:: ../images/skysat_stereo_grand_mesa.png
:name: skysat-stereo-example
:alt: SkySat stereo example
The colorized bundle adjustment camera reprojection errors (pointmap.csv)
overlaid on top of the Copernicus 30 m DEM for Grand Mesa, Colorado, before
optimization (left) and after (right). Plotted with ``stereo_gui``. Maximum
shade of red is reprojection error of at least 5 pixels. The same set of
clean interest points was used in both plots. It can be seen that while
bundle adjustment changes the cameras very little, it makes a very big
difference in how consistent the cameras become.
The camera positions and orientations (the latter in NED coordinates)
are summarized in two report files, before and after optimization
(:numref:`ba_cam_pose`). It is suggested to examine if these are
plausible. It is expected that the spacecraft position and orientation
will change in a slow and smooth manner, and that these will not change
drastically during bundle adjustment.
If desired to do further experiments in bundle adjustment, the
existing interest matches can be reused via the options
``--clean-match-files-prefix`` and ``--match-files-prefix``. The
matches can be inspected with ``stereo_gui``
(:numref:`stereo_gui_pairwise_matches`).
DEM creation
^^^^^^^^^^^^
How to decide which pairs of images to choose for stereo and how to combine
the resulting DEMs taking into account the stereo convergence angle is
described in :numref:`sfm_multiview`.
.. _skysat_video:
Video data
~~~~~~~~~~
The rest of this section will be concerned with the ``Video`` product,
which is a set of images recorded together in quick sequence. This is
a very capricious dataset, so some patience will be needed to work
with it. That is due to the following factors:
- The baseline can be small, so the perspective of the left and right
image can be too similar.
- The footprint on the ground is small, on the order of 2 km.
- The terrain can be very steep.
- The known longitude-latitude corners of each image have only a few
digits of precision, which can result in poor initial estimated
cameras.
Below a recipe for how to deal with this data is described, together
with things to watch for and advice when things don't work.
See also how the Stereo product was processed
(:numref:`skysat_stereo`). That section is newer, and that product
was explored in more detail. Stereo products are better-behaved than
Video products, so it is suggested to work with Stereo data, if possible,
or at least cross-reference with that section the logic below.
The input data
~~~~~~~~~~~~~~
We will use as an illustration a mountainous terrain close to
Breckenridge, Colorado. The dataset we fetched is called
``s4_20181107T175036Z_video.zip``. We chose to work with the following
four images from it::
1225648254.44006968_sc00004_c1_PAN.tiff
1225648269.40892076_sc00004_c1_PAN.tiff
1225648284.37777185_sc00004_c1_PAN.tiff
1225648299.37995577_sc00004_c1_PAN.tiff
A sample picture from this image set is shown in :numref:`skysat-example`.
It is very important to pick images that have sufficient difference in
perspective, but which are still reasonably similar, as otherwise the
procedure outlined in this section will fail.
.. figure:: ../images/Breckenridge.jpg
:name: skysat-example
:alt: SkySat example
An image used in the SkySat example. Reproduced with permission.
.. _refdem:
Initial camera models and a reference DEM
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Based on vendor's documentation, these images are
:math:`2560 \times 1080` pixels. We use the geometric center of the
image as the optical center, which turned out to be a reasonable enough
assumption (verified by allowing it to float later). Since the focal
length is given as 3.6 m and the pixel pitch is
:math:`6.5 \times 10^{-6}` m, the focal length in pixels is
.. math:: 3.6/6.5 \times 10^{-6} = 553846.153846.
Next, a reference DEM needs to be found. Recently we recommend getting
a Copernicus 30 m DEM (:numref:`initial_terrain`).
It is very important to note that SRTM DEMs can be relative to the WGS84
ellipsoidal vertical datum, or relative to the EGM96 geoid. In the latter case,
``dem_geoid`` (:numref:`dem_geoid`) needs to be used to first convert it to be
relative to WGS84. This may apply up to 100 meters of vertical adjustment.
See :numref:`conv_to_ellipsoid`.
It is good to be a bit generous when selecting the extent of the reference DEM.
We will rename the downloaded DEM to ``ref_dem.tif``.
Using the ``cam_gen`` tool (:numref:`cam_gen`) bundled with ASP, we
create an initial camera model and a GCP file (:numref:`bagcp`) for
the first image as as follows::
cam_gen 1225648254.44006968_sc00004_c1_PAN.tiff \
--frame-index output/video/frame_index.csv \
--reference-dem ref_dem.tif \
--focal-length 553846.153846 \
--optical-center 1280 540 \
--pixel-pitch 1 --height-above-datum 4000 \
--refine-camera \
--gcp-std 1 \
--gcp-file v1.gcp \
-o v1.tsai
This tool works by reading the longitude and latitude of each image
corner on the ground from the file ``frame_index.csv``, and finding the
position and orientation of the camera that best fits this data. The
camera is written to ``v1.tsai``. A GCP file is written to ``v1.gcp``.
This will help later with bundle adjustment.
If an input camera exists, such as embedded in the image file, it is
strongly suggested to pass it to this tool using the
``--input-camera`` option, as it will improve the accuracy of produced
cameras (:numref:`skysat-rpc`).
In the above command, the optical center and focal length are as mentioned
earlier. The reference SRTM DEM is used to infer the height above datum
for each image corner based on its longitude and latitude. The height
value specified via ``--height-above-datum`` is used as a fallback
option, if for example, the DEM is incomplete, and is not strictly
necessary for this example. This tool also accepts the longitude and
latitude of the corners as an option, via ``--lon-lat-values``.
The flag ``--refine-camera`` makes ``cam_gen`` solve a least square
problem to refine the output camera. In some cases it can get the
refinement wrong, so it is suggested experimenting with and without
using this option.
For simplicity of notation, we will create a symbolic link from this
image to the shorter name ``v1.tif``, and the GCP file needs to be
edited to reflect this. The same will apply to the other files. We will
have then four images, ``v1.tif, v2.tif, v3.tif, v4.tif``, and
corresponding camera and GCP files.
A good sanity check is to visualize the computed cameras.
ASP's ``sfm_view`` tool can be used (:numref:`sfm_view`). Alternatively,
ASP's ``orbitviz`` program (:numref:`orbitviz`) can create KML files
that can then be opened in Google Earth.
We very strongly recommend inspecting the camera positions and orientations,
since this may catch inaccurate cameras which will cause problems later.
Another important check is to mapproject these images using the cameras
and overlay them in ``stereo_gui`` on top of the reference DEM. Here is
an example for the first image::
mapproject --t_srs \
'+proj=stere +lat_0=39.4702 +lon_0=253.908 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +units=m' \
ref_dem.tif v1.tif v1.tsai v1_map.tif
Notice that we used above a longitude and latitude around the area of
interest. This will need to be modified for your specific example.
Bundle adjustment
~~~~~~~~~~~~~~~~~
At this stage, the cameras should be about right, but not quite exact.
We will take care of this using bundle adjustment. We will invoke this
tool twice. In the first call we will make the cameras self-consistent.
This may move them somewhat, though the ``--tri-weight`` constraint
that is used below should help. In the second call we will try to
bring the back to the original location.
::
parallel_bundle_adjust \
v[1-4].tif v[1-4].tsai \
-t nadirpinhole \
--disable-tri-ip-filter \
--skip-rough-homography \
--force-reuse-match-files \
--ip-inlier-factor 2.0 \
--ip-uniqueness-threshold 0.8 \
--ip-per-image 20000 \
--datum WGS84 \
--inline-adjustments \
--camera-weight 0 \
--tri-weight 0.1 \
--robust-threshold 2 \
--remove-outliers-params '75 3 4 5' \
--ip-num-ransac-iterations 1000 \
--num-passes 2 \
--auto-overlap-params "ref.tif 15" \
--num-iterations 1000 \
-o ba/run
parallel_bundle_adjust \
-t nadirpinhole \
--datum WGS84 \
--force-reuse-match-files \
--inline-adjustments \
--num-passes 1 --num-iterations 0 \
--transform-cameras-using-gcp \
v[1-4].tif ba/run-v[1-4].tsai v[1-4].gcp \
-o ba/run
The ``--auto-overlap-params`` option used earlier is useful a very large
number of images is present and a preexisting DEM of the area is available,
which need not be perfectly aligned with the cameras. It can be used
to determine each camera's footprint, and hence, which cameras overlap.
Otherwise, use the ``--overlap-limit`` option to control how many subsequent
images to match with a given image.
The output optimized cameras will be named ``ba/run-run-v[1-4].tsai``.
The reason one has the word "run" repeated is because we ran this tool
twice. The intermediate cameras from the first run were called
``ba/run-v[1-4].tsai``.
Here we use ``--ip-per-image 20000`` to create a lot of interest points.
This will help with alignment later. It is suggested that the user study
all these options and understand what they do. We also used
``--robust-threshold 10`` to force the solver to work the bigger errors.
That is necessary since the initial cameras could be pretty inaccurate.
It is very important to examine the residual file named::
ba/run-final_residuals_pointmap.csv
(:numref:`ba_err_per_point`).
Here, the third column are the heights of triangulated interest
points, while the fourth column are the reprojection errors. Normally
these errors should be a fraction of a pixel, as otherwise the
solution did not converge. The last entries in this file correspond to
the GCP, and those should be looked at carefully as well. The
reprojection errors for GCP should be on the order of tens of pixels
because the longitude and latitude of each GCP are not
well-known. This can be done with :numref:`stereo_gui`, which will
also colorize the residuals (:numref:`plot_csv`).
It is also very important to examine the obtained match files in the
output directory. For that, use ``stereo_gui`` with the option
``--pairwise-matches`` (:numref:`stereo_gui_view_ip`). If there are
too few matches, particularly among very similar images, one may need
to increase the value of ``--epipolar-threshold`` (or of
``--ip-inlier-factor`` for the not-recommended pinhole session). Note
that a large value here may allow more outliers, but those should normally
by filtered out by ``bundle_adjust``.
Another thing one should keep an eye on is the height above datum of the camera
centers as printed by bundle adjustment towards the end. Any large difference in
camera heights (say more than a few km) could be a symptom of some failure.
Also look at how much the triangulated points and camera centers moved as a
result of bundle adjustment. Appropriate constraints may need to be applied
(:numref:`ba_constraints`).
Note that using the ``nadirpinhole`` session is equivalent to using ``pinhole``
and setting ``--datum``.
.. _skysat_video_stereo:
Creating terrain models
~~~~~~~~~~~~~~~~~~~~~~~
The next steps are to run ``parallel_stereo`` and create DEMs.
We will run the following command for each pair of images. Note that we
reuse the filtered match points created by bundle adjustment, with the
``--clean-match-files-prefix`` option.
::
i=1
((j=i+1))
st=stereo_v${i}${j}
rm -rfv $st
mkdir -p $st
parallel_stereo --skip-rough-homography \
-t nadirpinhole --stereo-algorithm asp_mgm \
v${i}.tif v${j}.tif \
ba/run-run-v${i}.tsai ba/run-run-v${j}.tsai \
--clean-match-files-prefix ba/run \
$st/run
point2dem --auto-proj-center \
--tr 4 --errorimage $st/run-PC.tif
(Repeat this for other values of :math:`i`.)
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices.
See :numref:`point2dem_proj` about DEM projection determination.
It is important to examine the mean triangulation error
(:numref:`triangulation_error`) for each DEM::
gdalinfo -stats stereo_v12/run-IntersectionErr.tif | grep Mean
which should hopefully be no more than 0.5 meters, otherwise likely
bundle adjustment failed. One should also compare the DEMs among
themselves::
geodiff --absolute stereo_v12/run-DEM.tif stereo_v23/run-DEM.tif -o tmp
gdalinfo -stats tmp-diff.tif | grep Mean
(And so on for any other pair.) Here the mean error should be on the
order of 2 meters, or hopefully less.
Mosaicking and alignment
~~~~~~~~~~~~~~~~~~~~~~~~
If more than one image pair was used, the obtained DEMs can be
mosaicked with ``dem_mosaic`` (:numref:`dem_mosaic`)::
dem_mosaic stereo_v12/run-DEM.tif stereo_v23/run-DEM.tif \
stereo_v34/run-DEM.tif -o mosaic.tif
This DEM can be hillshaded and overlaid on top of the reference DEM.
The next step is aligning it to the reference.
::
pc_align --max-displacement 1000 --save-transformed-source-points \
--alignment-method similarity-point-to-point \
ref_dem.tif mosaic.tif -o align/run
It is important to look at the errors printed by this tool before and
after alignment, as well as details about the alignment that was
applied. The obtained aligned cloud can be made into a DEM again::
point2dem \
--auto-proj-center \
--tr 4 \
align/run-trans_source.tif
The absolute difference before and after alignment can be found as
follows::
geodiff --absolute mosaic.tif ref_dem.tif -o tmp
gdalinfo -stats tmp-diff.tif | grep Mean
::
geodiff --absolute align/run-trans_source-DEM.tif ref_dem.tif -o tmp
gdalinfo -stats tmp-diff.tif | grep Mean
In this case the mean error after alignment was about 6.5 m, which is
not too bad given that the reference DEM resolution is about 30 m/pixel.
Alignment of cameras
~~~~~~~~~~~~~~~~~~~~
The transform computed with ``pc_align`` can be used to bring the
cameras in alignment to the reference DEM. That can be done as follows::
parallel_bundle_adjust -t nadirpinhole --datum wgs84 \
--force-reuse-match-files \
--inline-adjustments \
--initial-transform align/run-transform.txt \
--apply-initial-transform-only \
v[1-4].tif ba/run-run-v[1-4].tsai -o ba/run
creating the aligned cameras ``ba/run-run-run-v[1-4].tsai``. If
``pc_align`` was called with the reference DEM being the second cloud,
one should use above the file::
align/run-inverse-transform.txt
as the initial transform.
Mapprojection
~~~~~~~~~~~~~
If the steep topography prevents good DEMs from being created, one can
mapproject the images first onto the reference DEM::
for i in 1 2 3 4; do
mapproject --tr gridSize ref_dem.tif v${i}.tif \
ba/run-run-run-v${i}.tsai v${i}_map.tif
done
It is very important to use the same resolution (option ``--tr``) for
both images when mapprojecting. That helps making the resulting images
more similar and reduces the processing time (:numref:`mapproj-res`).
Then run ``parallel_stereo`` with the mapprojected images, such as::
i=1
((j=i+1))
rm -rfv stereo_map_v${i}${j}
parallel_stereo v${i}_map.tif v${j}_map.tif \
ba/run-run-run-v${i}.tsai ba/run-run-run-v${j}.tsai \
--session-type pinhole --alignment-method none \
--cost-mode 4 --stereo-algorithm asp_mgm --corr-seed-mode 1 \
stereo_map_v${i}${j}/run ref_dem.tif
point2dem --auto-proj-center \
--tr 4 --errorimage \
stereo_map_v${i}${j}/run-PC.tif
It is important to note that here we used the cameras that were aligned
with the reference DEM. We could have as well mapprojected onto a
lower-resolution version of the mosaicked and aligned DEM with its holes
filled.
When things fail
~~~~~~~~~~~~~~~~
Processing SkySat images is difficult, for various reasons mentioned
earlier. A few suggestions were also offered along the way when things
go wrong.
Problems are usually due to cameras being initialized inaccurately by
``cam_gen`` or bundle adjustment not optimizing them well. The simplest
solution is often to just try a different pair of images from the
sequence, say from earlier or later in the flight, or a pair with less
overlap, or with more time elapsed between the two acquisitions.
Modifying various parameters may help as well.
We have experimented sufficiently with various SkySat datasets to be
sure that the intrinsics (focal length, optical center, and pixel pitch)
are usually not the issue, rather the positions and orientations of the
cameras.
Structure from motion
~~~~~~~~~~~~~~~~~~~~~
In case ``cam_gen`` does not create sufficiently good cameras, one
can attempt to use the ``camera_solve`` tool (:numref:`sfm`). This
will create hopefully good cameras but in an arbitrary coordinate
system. Then we will transfer those to the world coordinates using
GCP.
Here is an example for two cameras::
out=out_v12
ba_params="--num-passes 1 --num-iterations 0
--transform-cameras-using-gcp"
theia_overdides="--sift_num_levels=6 --lowes_ratio=0.9
--min_num_inliers_for_valid_match=10
--min_num_absolute_pose_inliers=10
--bundle_adjustment_robust_loss_function=CAUCHY
--post_rotation_filtering_degrees=180.0 --v=2
--max_sampson_error_for_verified_match=100.0
--max_reprojection_error_pixels=100.0
--triangulation_reprojection_error_pixels=100.0
--min_num_inliers_for_valid_match=10
--min_num_absolute_pose_inliers=10"
rm -rfv $out
camera_solve $out --datum WGS84 --calib-file v1.tsai \
--bundle-adjust-params "$ba_params v1.gcp v2.gcp" \
v1.tif v2.tif
The obtained cameras should be bundle-adjusted as done for the outputs
of ``cam_gen``. Note that this tool is capricious and its outputs can be
often wrong. In the future it will be replaced by something more robust.
.. _skysat-rpc:
RPC models
~~~~~~~~~~
Some SkySat datasets come with RPC camera models, typically embedded in
the images. This can be verified by running::
gdalinfo -stats output/video/frames/1225648254.44006968_sc00004_c1_PAN.tiff
We found that these models are not sufficiently robust for stereo. But
they can be used to create initial guess pinhole cameras
(:numref:`pinholemodels`) with ``cam_gen``.
We will use the RPC camera model instead of longitude and latitude of
image corners to infer the pinhole camera position and orientation.
This greatly improves the accuracy and reliability.
Here is an example::
img=output/video/frames/1225648254.44006968_sc00004_c1_PAN.tiff
cam_gen $img --reference-dem ref_dem.tif --focal-length 553846.153846 \
--optical-center 1280 540 --pixel-pitch 1 --height-above-datum 4000 \
--refine-camera --gcp-std 1 --input-camera $img \
-o v1_rpc.tsai --gcp-file v1_rpc.gcp
Note that the Breckenridge dataset does not have RPC data, but other
datasets do. If the input camera is stored separately in a camera file,
use that one with ``--input-camera``.
If an RPC model is embedded in the image, one can validate how well the new Pinhole
camera approximates the existing RPC camera with ``cam_test``
(:numref:`cam_test`), with a command like::
cam_test --image image.tif --cam1 image.tif --cam2 out_cam.tsai \
--height-above-datum 4000
Then one can proceed as earlier (particularly the GCP file can be edited
to reflect the shorter image name).
One can also regenerate the provided SkySat RPC model as::
cam2rpc -t rpc --dem-file dem.tif input.tif output.xml
Here, the reference DEM should go beyond the extent of the image. This
tool makes it possible to decide how finely to sample the DEM, and one
can simply use longitude-latitude and height ranges instead of the DEM.
We assumed in the last command that the input image implicitly stores
the RPC camera model, as is the case for SkySat.
Also, any pinhole camera models obtained using our software can be
converted to RPC models as follows::
cam2rpc --dem-file dem.tif input.tif input.tsai output.xml
Bundle adjustment using reference terrain
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
At this stage, if desired, but this is rather unnecessary, one can do
joint optimization of the cameras using dense and uniformly distributed
interest points, and using the reference DEM as a constraint. This
should make the DEMs more consistent among themselves and closer to the
reference DEM.
It is also possible to float the intrinsics, per
:numref:`floatingintrinsics`, which sometimes can improve the results
further.
For that, one should repeat the ``stereo_tri`` part of of the stereo
commands from :numref:`skysat_video_stereo` with the flags
``--num-matches-from-disp-triplets 10000`` and ``--unalign-disparity``
to obtain dense interest points and unaligned disparity.
(To not generate the triangulated point cloud after
this, add the option ``--compute-point-cloud-center-only``.)
Use ``--num-matches-from-disparity 10000`` if the images are large,
as the earlier related option can be very slow then.
The match points can be examined as::
stereo_gui v1.tif v2.tif stereo_v12/run-disp-v1__v2.match
and the same for the other image pairs. Hopefully they will fill as much
of the images as possible. One should also study the unaligned
disparities, for example::
stereo_v12/run-v1__v2-unaligned-D.tif
by invoking ``disparitydebug`` on it and then visualizing the two
obtained images. Hopefully these disparities are dense and with few
holes.
The dense interest points should be copied to the new bundle adjustment
directory, such as::
mkdir -p ba_ref_terrain
cp stereo_v12/run-disp-v1__v2.match ba_ref_terrain/run-v1__v2.match
and the same for the other ones (note the convention for match files in
the new directory). The unaligned disparities can be used from where
they are.
Then bundle adjustment using the reference terrain constraint proceeds
as follows::
disp_list=$(ls stereo_v[1-4][1-4]/*-unaligned-D.tif)
bundle_adjust v[1-4].tif ba/run-run-run-v[1-4].tsai -o ba_ref_terrain/run \
--reference-terrain ref_dem.tif --disparity-list "$disp_list" \
--max-num-reference-points 10000000 --reference-terrain-weight 50 \
--parameter-tolerance 1e-12 -t nadirpinhole --max-iterations 500 \
--overlap-limit 1 --inline-adjustments --robust-threshold 2 \
--force-reuse-match-files --max-disp-error 100 --camera-weight 0
If invoking this creates new match files, it means that the dense match
files were not copied successfully to the new location. If this
optimization is slow, perhaps too many reference terrain points were
picked.
This will create, as before, the residual file named::
ba_ref_terrain/run-final_residuals_pointmap.csv
showing how consistent are the cameras among themselves, and in
addition, a file named::
ba_ref_terrain/run-final_residuals_reference_terrain.txt
which tells how well the cameras are aligned to the reference terrain.
The errors in the first file should be under 1 pixel, and in the second
one should be mostly under 2-3 pixels (both are the fourth column in
these files).
The value of ``--reference-terrain-weight`` can be increased to make the
alignment to the reference terrain a little tighter.
It is hoped that after running ``parallel_stereo`` with these refined
cameras, the obtained DEMs will differ by less than 2 m among
themselves, and by less than 4 m as compared to the reference DEM.
Floating the camera intrinsics
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If desired to float the focal length as part of the optimization, one
should pass in addition, the options::
--solve-intrinsics --intrinsics-to-float 'focal_length'
Floating the optical center can be done by adding it in as well.
It is important to note that for SkySat the intrinsics seem to be
already quite good, and floating them is not necessary and is only shown
for completeness. If one wants to float them, one should vary the focal
length while keeping the optical center fixed, and vice versa, and
compare the results. Then, with the result that shows most promise, one
should vary the other parameter. If optimizing the intrinsics too
aggressively, it is not clear if they will still deliver better results
with other images or if comparing with a different reference terrain.
Yet, if desired, one can float even the distortion parameters. For that,
the input camera files need to be converted to some camera model having
these (see :numref:`pinholemodels`), and their
values can be set to something very small. One can use the Brown-Conrady
model, for example, so each camera file must have instead of ``NULL`` at
the end the fields::
BrownConrady
xp = -1e-12
yp = -1e-12
k1 = -1e-10
k2 = -1e-14
k3 = -1e-22
p1 = -1e-12
p2 = -1e-12
phi = -1e-12
There is always a chance when solving these parameters that the obtained
solution is not optimal. Hence, one can also try using as initial
guesses different values, for example, by negating the above numbers.
One can also try to experiment with the option ``--heights-from-dem``,
and also with ``--robust-threshold`` if it appears that the large errors
are not minimized enough.
================================================
FILE: docs/examples/spot5.rst
================================================
.. _spot5:
SPOT5
-----
SPOT5 is a CNES (Space Agency of France) satellite launched on May 2002
and decommissioned in March 2015. SPOT5 contained two High Resolution
Stereoscopic (HRS) instruments with a ground resolution of 5 meters.
These two cameras were pointed forwards and backwards, allowing capture
of a stereo image pair in a single pass of the satellite.
For the newer SPOT 6 and SPOT 7 satellites, which use a different format and
camera model, see :numref:`spot67`.
ASP supports only images from the HRS sensors on SPOT5. These images
come in two parts, the data file (extension ``.bil`` or ``.tif``) and
the header file the data file (extension ``.dim``). The data file can be
either a plain binary file with no header information or a GeoTIFF file.
The header file is a plain text XML file. When using SPOT5 images with
ASP tools, pass in the data file as the image file and the header file
as the camera model file.
All ASP tools can handle ``.bil`` images (and also ``.bip`` and ``.bsq``)
as long as a similarly named ``.dim`` file exists that can be looked
up. The lookup succeeds if, for example, the ``.dim`` and ``.bil``
files differ only by extension (lower or upper case), or, as below,
when an IMAGERY.BIL file has a corresponding METADATA file.
A sample SPOT5 image can be found at at
http://www.geo-airbusds.com/en/23-sample-imagery.
Image preparation
~~~~~~~~~~~~~~~~~
SPOT5 datasets come in a directory structure where the front and back images
have the same name, without the path, and the same for the camera files. This
conflicts with the ``bundle_adjust`` assumptions.
A simple workaround is to rename the images and cameras::
mv front/SEGMT01/METADATA.BIL front/SEGMT01/METADATA_FRONT.BIL
mv back/SEGMT01/METADATA.BIL back/SEGMT01/METADATA_BACK.BIL
mv front/SEGMT01/METADATA.DIM front/SEGMT01/METADATA_FRONT.DIM
mv back/SEGMT01/METADATA.DIM back/SEGMT01/METADATA_BACK.DIM
Stereo with raw images
^^^^^^^^^^^^^^^^^^^^^^
Run bundle adjustment (:numref:`bundle_adjust`)::
bundle_adjust -t spot5 \
front/SEGMT01/IMAGERY_FRONT.BIL \
back/SEGMT01/IMAGERY_BACK.BIL \
front/SEGMT01/METADATA_FRONT.DIM \
back/SEGMT01/METADATA_BACK.DIM \
-o ba_run/out
It is not clear if SPOT5 images benefit from bundle adjustment.
Run ``parallel_stereo`` (:numref:`parallel_stereo`) with the adjusted cameras::
parallel_stereo -t spot5 \
--bundle-adjust-prefix ba_run/out \
--stereo-algorithm asp_mgm \
front/SEGMT01/IMAGERY_FRONT.BIL \
back/SEGMT01/IMAGERY_BACK.BIL \
front/SEGMT01/METADATA_FRONT.DIM \
back/SEGMT01/METADATA_BACK.DIM \
st_run/out
Here uses the ``asp_mgm`` algorithm. See :numref:`nextsteps` for a discussion
about various speed-vs-quality choices of the stereo algorithms.
This is followed by DEM creation with ``point2dem`` (:numref:`point2dem`)::
point2dem st_run/out-PC.tif
Stereo with mapprojected images
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For terrains with steep slopes, it is strongly suggested to run stereo with
mapprojected images (:numref:`mapproj-example`). For that, first use the
``add_spot_rpc`` tool to generate an RPC model approximation of the SPOT5 sensor
model.
::
add_spot_rpc front/SEGMT01/METADATA_FRONT.DIM \
-o front/SEGMT01/METADATA_FRONT.DIM
add_spot_rpc back/SEGMT01/METADATA.DIM \
-o back/SEGMT01/METADATA_BACK.DIM
This will append the RPC model to the existing file. If the output
is a separate file, only the RPC model will be saved to the new file.
Then use the ``spot5maprpc`` session type when running parallel_stereo on the
mapprojected images.
Ensure that any external DEM is adjusted, if needed, to be relative the
ellipsoid (:numref:`conv_to_ellipsoid`).
See the note in :numref:`dem_prep` about perhaps reducing the resolution of the
DEM to mapproject onto (and perhaps blurring it) if ghosting artifacts are seen
in the produced DEM.
The mapprojection step is next (:numref:`mapproject`)::
mapproject -t rpc \
--bundle-adjust-prefix ba_run/out \
--tr gridSize \
sample_dem.tif \
front/SEGMT01/IMAGERY_FRONT.BIL \
front/SEGMT01/METADATA_FRONT.DIM \
front_map_proj.tif
mapproject -t rpc \
--bundle-adjust-prefix ba_run/out \
--ref-map front_map_proj.tif \
sample_dem.tif \
back/SEGMT01/IMAGERY_BACK.BIL \
back/SEGMT01/METADATA_BACK.DIM \
back_map_proj.tif
The grid size is the known ground sample distance (GSD) of the image, in meters.
If not set, it will be auto-guessed.
Notice how we used the option ``--ref-map`` to ensure the second mapprojected
image uses the same grid size and projection as the first one. In older versions
of ASP, one must specify for both images the same projection in meters (such as
UTM), via ``--t_srs``, and the same grid size, via ``--tr``.
Stereo::
parallel_stereo -t spot5maprpc \
--bundle-adjust-prefix ba_run/out \
--stereo-algorithm asp_mgm \
front_map_proj.tif \
back_map_proj.tif \
front/SEGMT01/METADATA_FRONT.DIM \
back/SEGMT01/METADATA_BACK.DIM \
st_run_map/out \
sample_dem.tif
DEM creation::
point2dem st_run_map/out-PC.tif
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices
of the stereo algorithms.
If desired not to use bundle adjustment, then need not set the option ``--bundle-adjust-prefix``.
.. figure:: ../images/examples/spot5_figure.png
:name: spot5_output
Cropped region of SPOT5 image and a portion of the associated stereo
DEM overlaid on a low resolution Bedmap2 DEM.
================================================
FILE: docs/examples/spot67.rst
================================================
.. _spot67:
SPOT 6/7
--------
SPOT 6 (launched 2012) and SPOT 7 (launched 2014) are Airbus Earth observation
satellites with 1.5-meter panchromatic resolution. They are part of the same
SPOT family as SPOT 5 (:numref:`spot5`), but use the DIMAP V2 XML format and a
linescan camera model that closely follows the Pleiades sensor
(:numref:`pleiades`).
ASP expects raw (non-orthorectified) images. The data have both an exact
linescan camera model and an approximate RPC model (:numref:`rpc`), stored in
separate XML files whose names start with "DIM" and "RPC", respectively. ASP
supports both. The USGS CSM library (:numref:`csm`) is used for linescan
models.
The session type is ``-t spot`` (:numref:`ps_options`). If the ``-t``
option is not specified, it will be auto-detected from the camera files.
.. _spot67_stereo:
Bundle adjustment and stereo with raw images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Bundle adjustment (:numref:`bundle_adjust`) is suggested before stereo::
bundle_adjust -t spot \
--camera-weight 0 \
--tri-weight 0.1 \
left.tif right.tif \
left_exact.xml right_exact.xml \
-o ba/run
With the exact models, the stereo command, with bundle-adjusted cameras, is::
parallel_stereo -t spot \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--bundle-adjust-prefix ba/run \
left.tif right.tif \
left_exact.xml right_exact.xml \
results/run
Then, a DEM is created with ``point2dem`` (:numref:`point2dem`)::
point2dem results/run-PC.tif
For steep terrain, it is suggested to run stereo with mapprojected images
(:numref:`spot67_map`).
See :numref:`nextsteps` for a discussion about various speed-vs-quality choices
for stereo.
See :numref:`jitter_pleiades` for an example of solving for jitter with these
cameras. Note the limitations of the jitter solver in
:numref:`jitter_limitations`. This is available as of build 2026/03
(:numref:`release`).
For the RPC model (:numref:`rpc`), the option ``-t rpc`` should be used
and the RPC camera files should be passed in. If the ``-t`` option is
not specified, it will be auto-guessed based on the content of the
camera files provided as inputs.
.. _spot67_map:
Stereo with mapprojected images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ASP supports running stereo with mapprojected SPOT 6/7 images
(:numref:`mapproj-example`).
All input images must be mapprojected at the same resolution (which is
comparable with the ground sample distance, GSD). The same camera models must be
used for mapprojection as for stereo, so one should not mix the exact and RPC
cameras.
Ensure the input DEM used for mapprojection is relative to the ellipsoid
(:numref:`conv_to_ellipsoid`).
Example::
proj="+proj=utm +zone=13 +datum=WGS84 +units=m +no_defs"
mapproject -t spot \
--tr 1.5 \
--t_srs "$proj" \
--bundle-adjust-prefix ba/run \
ref_dem.tif \
left.tif \
left_exact.xml \
left_map.tif
mapproject -t spot \
--tr 1.5 \
--t_srs "$proj" \
--bundle-adjust-prefix ba/run \
ref_dem.tif \
right.tif \
right_exact.xml \
right_map.tif
parallel_stereo -t spot \
--stereo-algorithm asp_mgm \
--subpixel-mode 9 \
--bundle-adjust-prefix ba/run \
left_map.tif right_map.tif \
left_exact.xml right_exact.xml \
run_map/run \
ref_dem.tif
point2dem run_map/run-PC.tif
The projection string above needs to be modified for your area of interest. It
is strongly suggested to use an auto-determined UTM or polar stereographic
projection (:numref:`point2dem_proj`).
The value of the ``--tr`` option is the ground sample distance. It is normally
1.5 meters for SPOT 6/7 PAN images. The XML files should have the GSD value.
To not use bundle-adjusted cameras, remove the option ``--bundle-adjust-prefix``
from all ``mapproject`` and ``parallel_stereo`` commands above.
.. _spot67_exact_vs_rpc:
Exact and RPC cameras
~~~~~~~~~~~~~~~~~~~~~
To compare the linescan (exact) and RPC models, run ``cam_test``
(:numref:`cam_test`) as::
cam_test --image img.tif \
--cam1 cam_exact.xml \
--cam2 cam_rpc.xml \
--session1 spot --session2 rpc
With the ESA La Crau sample (PAN band) and the additional option
``--height-above-datum 200`` (given the validity range in the RPC model), the
pixel difference between exact and RPC models was max 0.002 pixels.
The camera centers computed by the two methods won't agree, because the RPC
camera model does not store the camera center. ASP then substitutes it with an
estimated point on the ray from the camera center to the ground. This
disagreement is not an issue in practice.
Note that SPOT 6/7 RPCs use 0-based pixel offsets, while Pleiades RPCs use
1-based offsets. ASP handles both conventions automatically.
================================================
FILE: docs/examples/stereo_pairs.rst
================================================
.. _stereo_pairs:
Guidelines for selecting stereo pairs
-------------------------------------
When choosing image pairs to process, images that are taken with
similar lighting conditions and significant surface coverage overlap
are best suited for creating terrain models
:cite:`2015LPI462703B`. The images should have sufficient difference
in perspective, hence a reasonably large baseline, or, equivalently, a
non-small convergence angle between the matching rays emanating from
the two cameras, for stereo triangulation to be accurate. Yet, if the
perspectives are very different, it will be challenging to compute the
stereo correlation between images. A convergence angle of 10 to 60
degrees is likely reasonable.
Depending on the characteristics of the mission data set and the
individual images, the degree of acceptable variation will
differ. Significant differences between image characteristics
increases the likelihood of stereo matching error and artifacts, and
these errors will propagate through to the resulting data products.
The ``parallel_stereo`` and ``bundle_adjust`` programs compute the convergence
angle for input cameras. In stereo that happens at the preprocessing and
triangulation stages (:numref:`entrypoints`), with the result printed on the
screen and saved to the log files. In ``bundle_adjust`` this computation takes
place after the optimization of the cameras finishes, and the results are saved
to a file on disk (:numref:`ba_out_files`). To find good stereo pairs, one can
run bundle adjustment on a large set of images and pick a pair with a decent
convergence angle.
Although images do not need to be mapprojected before running the
``parallel_stereo`` program, we recommend that you do run ``cam2map`` (or
``cam2map4stereo.py``) beforehand, especially for image pairs that
contain large topographic variation (and therefore large disparity
differences across the scene, e.g., Valles Marineris). mapprojection is
especially necessary when processing HiRISE images. This removes the
large disparity differences between HiRISE images and leaves only the
small detail for the Stereo Pipeline to compute. Remember that ISIS can
work backwards through a mapprojection when applying the camera model,
so the geometric integrity of your images will not be sacrificed if you
mapproject first.
An alternative way of mapprojection, that applies to non-ISIS images
as well, is with the ``mapproject`` tool (:numref:`mapproj-example`).
Excessively noisy images will not correlate well, so images should be
photometrically calibrated in whatever fashion suits your purposes. If
there are photometric problems with the images, those photometric
defects can be misinterpreted as topography.
Remember, in order for ``parallel_stereo`` to process stereo pairs in
ISIS cube format, the images must have had SPICE data associated by
running ISIS's ``spiceinit`` program run on them first.
================================================
FILE: docs/examples/umbra_sar.rst
================================================
.. _umbra_sar:
Umbra SAR
---------
Here we describe processing Synthetic Aperture Radar (SAR) images for Earth
produced by `Umbra `_. A SAR example for
the Moon is in :numref:`csm_minirf`.
Overview
~~~~~~~~
Umbra images are acquired in `Spotlight mode
`_.
SAR image appearance can vary drastically depending on the perspective. It is
important to choose images that are acquired close in time and have similar
viewing angles. The latter is measured by the provided incidence and azimuth
angles. Another measure is the squint angle.
If the stereo convergence angle (:numref:`stereo_pairs`) is too small, the
produced terrain model may not be accurate. If it is too large, the images may
be too different, and processing may fail. For the example below, this angle is
5.8 degrees (as printed by both ``parallel_stereo`` and ``bundle_adjust``). This
angle correlates well with the larger of the discrepancy in azimuth and
incidence angles between the images. We obtained acceptable results even with
convergence angles as low as 2.5 degrees.
Umbra provides GEC images that are corrected to be relative to an ellipsoid. More
raw products are available, including SICD, that have complex-valued pixels.
GEC images come with RPC (:numref:`rpc`) camera models embedded in the images
that we employ. ASP does not support the more rigorous SAR sensor models.
.. figure:: ../images/umbra_sar.png
:name: umbra_sar_fig
From left to right: hillshaded-terrain model, mapprojected
(:numref:`mapproject`) SAR image, and triangulation error image
(:numref:`triangulation_error`). The units in the colorbar on the right are
in meters.
Fetching the data
~~~~~~~~~~~~~~~~~
We downloaded the image pair::
2024-02-01-03-28-13_UMBRA-06_GEC.tif
2024-04-03-14-53-17_UMBRA-04_GEC.tif
showing a portion of the Panama Canal. Many other `Umbra datasets
`_ are available.
To make the notation shorter, we call these ``left.tif`` and ``right.tif``.
Mapprojection
~~~~~~~~~~~~~
It is suggested to run bundle adjustment first (:numref:`bundle_adjust`), to
make the images more self-consistent and reduce the triangulation error
(:numref:`triangulation_error`). For this data, bundle adjustment worked best
after mapprojecting the images.
Mapprojection should be done at the effective ground sample distance (GSD), not
nominal GSD, which can be so fine that the images may be noisy at that level.
How to find the effective resolution may require some inspection and/or reading
vendor's documentation.
How to find a DEM for mapprojection and how to adjust it to be relative to the
ellipsoid is described in :numref:`initial_terrain` and
:numref:`conv_to_ellipsoid`. We call that DEM ``ref.tif``.
Set the projection string. The UTM zone to use depends on the location of the
images.
::
proj="+proj=utm +zone=17 +ellps=WGS84 +units=m +no_defs"
The mapprojection (:numref:`mapproject`) step is as follows::
mapproject \
--tr 0.5 \
--t_srs "$proj" \
ref.tif \
left.tif \
left_proj.tif
and the same for the right image.
In the latest ASP, the projection string can be auto-determined
(:numref:`mapproj_auto_proj`). See :numref:`mapproj_refmap` for how to transfer
the projection to the right image.
Ignore any warnings about images already being mapprojected. The raw Umbra SAR
images do have some georeference information, but we will mapproject them in
either case, as results are better that way.
Bundle adjustment
~~~~~~~~~~~~~~~~~
Bundle adjustment (:numref:`bundle_adjust`) was run, while making use
of the mapprojected images (:numref:`mapip`)::
bundle_adjust -t rpc \
left.tif right.tif \
--remove-outliers-params \
"75.0 3.0 50 50" \
--mapprojected-data \
"left_proj.tif right_proj.tif ref.tif" \
-o ba/run
The cameras are embedded in the images, so they are not specified separately.
Alternatively, one can try the SIFT feature detection method
(``--ip-detect-method 1``) rather than the default (method 0). One may also
search for more interest point matches with an option such as ``--ip-per-tile``.
SAR images can be noisy and features hard to find. More features may not always
result in more matches if they are inaccurate.
How to create new RPC cameras that incorporate the adjustments is discussed in
:numref:`rpc_and_ba`. The default solution is to create external ``.adjust``
files that are passed to ``parallel_stereo`` via ``--bundle-adjust-prefix``, as
below.
More details on the ``bundle_adjust`` options are in :numref:`ba_options`.
Stereo processing
~~~~~~~~~~~~~~~~~
Next, ``parallel_stereo`` (:numref:`parallel_stereo`) was run. As before, it is
preferred to work with mapprojected images (:numref:`mapproj-example`).
::
parallel_stereo -t rpc \
--bundle-adjust-prefix ba/run \
--stereo-algorithm asp_mgm \
--nodes-list machines.txt \
left_proj.tif right_proj.tif \
stereo/run \
ref.tif
The ``asp_mgm`` algorithm worked much better than the default ``asp_bm``
(:numref:`stereo_alg_overview`).
If this program fails to find interest points, the same suggestions as for
bundle adjustment earlier apply.
A terrain model was produced with ``point2dem`` (:numref:`point2dem`),
in a local UTM projection (:numref:`point2dem_proj`)::
point2dem \
--auto-proj-center \
--t_srs "$proj" \
--errorimage \
--tr 2.0 \
stereo/run-PC.tif
It is suggested to compare the resulting terrain with the prior reference
terrain in ``ref.tif``.
Check if the stereo convergence angle is reasonable, as mentioned earlier.
Alignment
~~~~~~~~~
The ASP-created DEM was aligned to the reference DEM with ``pc_align``
(:numref:`pc_align`)::
pc_align \
--max-displacement 300 \
--save-inv-transformed-reference-points \
stereo/run-DEM.tif ref.tif \
-o align/run
A good value for the ``--max-displacement`` option is perhaps 1.5 times the mean
elevation difference between the two input DEMs that can be found with
``geodiff`` (:numref:`geodiff`) and ``gdalinfo -stats``.
The transformed cloud can be gridded back to a DEM as::
point2dem --tr 2.0 \
--t_srs "$proj" \
align/run-trans_reference.tif
Here, the projection string in ``$proj`` can be the same as for the DEM created earlier
(the ``gdalinfo -proj4`` command invoked on that DEM can print it).
The ``geodiff`` program can take the difference of the now-aligned DEMs.
Other inspections can be done as discussed in :numref:`visualizing_results`.
.. _umbra_failure:
Handling failure
~~~~~~~~~~~~~~~~
SAR images can be very hard to process, even when they look similar enough, due
to noise and fine-level speckle.
If the suggestions from above about increasing the number of interest point matches
in bundle adjustment and stereo do not work, consider trying a different stereo pair,
with a narrower stereo convergence angle, as this may result in more similar images.
Alternatively, bundle adjustment can be skipped altogether. Then, ``parallel_stereo`` can be
tried with the option ``--corr-seed-mode 2`` (:numref:`d_sub_dem`). In the latest
build (:numref:`release`) this option can function without interest points.
Consider increasing the correlation kernel size in ``parallel_stereo``
(:numref:`corr_section`), with an option such as ``--corr-kernel 9 9``. The
default is 5. The regular block matching algorithm (``asp_bm``,
:numref:`stereo_alg_overview`) may also work better for very noisy images, as it
has a larger default kernel size.
================================================
FILE: docs/examples.rst
================================================
.. include::
.. _examples:
Stereo processing examples
==========================
This chapter showcases examples of processing data sets acquired with specific
instruments. For a general introduction, see the tutorial (:numref:`tutorial`).
Structure-from-Motion examples are in :numref:`sfm_toc` (for orbital images with no
rig), and :numref:`rig_examples` (using a rig and robot images).
.. toctree::
examples/stereo_pairs
examples/hirise
examples/ctx
examples/moc
examples/mer
examples/k10
examples/lronac
examples/change3
examples/apollo15
examples/hrsc
examples/cassini
examples/csm
examples/dawn
examples/kaguya
examples/chandrayaan2
examples/junocam
examples/isis_minirf
examples/pbs_slurm
examples/aster
examples/dg
examples/rpc
examples/perusat1
examples/pleiades
examples/spot5
examples/spot67
examples/skysat
examples/historical
examples/bathy
examples/umbra_sar
.. _sfm_toc:
SfM examples
============
This chapter illustrates how to solve for cameras using Structure-from-Motion
(SfM), how to register the cameras to the ground, followed by producing a
terrain model.
.. toctree::
sfm
.. _rig_examples:
SfM examples with a rig
=======================
These examples shows how to solve for camera poses using
Structure-from-Motion (SfM) and then create textured meshes.
The images are acquired using a rig mounted on a robot on the ISS
(:numref:`rig_calibrator_example`, :numref:`sfm_iss`) and with the MSL
Curiosity rover (:numref:`rig_msl`).
Somewhat related examples, but without using a rig or the above
workflow, are in :numref:`sfm` (the images are acquired in orbit using
a satellite and a DEM is produced) and :numref:`mer-example` (a basic and
rather old two-image example for the MER rovers). See also :numref:`csm_msl`
for an example using CSM cameras for the MSL rover, without employing SfM.
.. toctree::
examples/rig
examples/sfm_iss
examples/msl
examples/orbital_rig
Shape-from-Shading
==================
.. toctree::
sfs_usage
examples/sfs_earth
examples/sfs_ctx
================================================
FILE: docs/experimental.rst
================================================
.. _experimental:
Experimental features
=====================
.. _casp_go:
The CASP-GO stereo processing system
------------------------------------
CASP-GO (Co-registered ASP using Gotcha Optimisation,
https://github.com/mssl-imaging/CASP-GO) is a set of algorithms that
are meant to augment certain parts of ASP :cite:`tao2016optimised,
tao2018massive, shin2012progressively, otto1989region`. Thse algorithms were developed
at the Imaging Group, Mullard Space Science Laboratory, University
College London, by Yu Tao, under the direction of Jan-Peter Muller,
with funding from the EU-FP7 project titled "iMars: Analysis of Mars
Multi-Resolution Images using Auto-Coregistration, Data Mining and
Crowd Sourcing Techniques", under contract #607379.
Under NASA proposal 19-PDART19_2-0094 we researched incorporating
these algorithms into ASP.
CASP-GO consists of three algorithms:
- Gotcha disparity refinement. Its purpose is to fix artifacts in
ASP's older ``asp_bm`` block-matching algorithm
(:numref:`stereo_algos_full`) at the disparity
filtering stage. It takes as input and overwrites the ``F.tif``
disparity (which is described in :numref:`outputfiles`).
This algorithm definitely provides some in-filling functionality over
the older ``asp_bm`` performance, but users may want to experiment
with other ASP stereo algorithms (like MGM) which may also result in high
quality disparities. This logic can be turned on with the
``--gotcha-disparity-refinement`` option of
``parallel_stereo``. See below for the parameters which control it.
- Image alignment. This component uses feature detection to find
interest point matches among orthoimages associated with given DEMs,
which is then used to compute an alignment transform among the DEMs
:cite:`sidiropoulos2018automatic`. It was incorporated into ASP and
further extended as the ``image_align`` tool (:numref:`image_align`).
- Kriging. This logic is meant to produce DEMs with
fewer holes than ASP's older method in ``point2dem`` (:numref:`point2dem`)
which used a Delaunay triangulation. It is based on a technique
called ``kriging``, which is a family of generalized linear least
square regression algorithms (:cite:`remy2002gstl`), implemented in
the ``Geostatistics Template Library`` (http://gstl.sourceforge.net/).
The CASP-GO DEM-creation algorithm functions along the same lines as ASP's
recent and default implementation in ``point2dem``. The input is a
point cloud, the output is a gridded DEM, and weighted averaging
is used to combine the 3D points to produce the DEM.
The only difference is that the recent ``point2dem`` implementation (circa 3.1.0)
computes the weights based on a Gaussian with given sigma and
neighborhood size, while CASP-GO uses weights produced by the kriging
procedure with a user-specified covariance.
CASP-GO's covariance function assigns the same covariance value to all
points, which results in the kriging procedure returning constant
weights. In effect, the resulting algorithm is a particular case of the
modern approach in ``point2dem``, when the sigma value is very large.
Thus, no separate implementation for kriging was implemented at this time.
.. For that reason, while kriging seems to be a very interesting technique,
because CASP-GO did not implement a good covariance function, and since
it would be quite tricky to assign a nontrivial covariance to
points in a cloud, we chose to not incorporate this implementation,
as it does not add to the existing functionality.
The CASP-GO parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~
CASP-GO's behavior is controlled by a parameter file, which ASP ships
as ``share/CASP-GO_params.xml``, and which can be overridden
with the ``parallel_stereo`` option ``--casp-go-param-file``.
Only the parameters relevant for Gotcha disparity refinement are read
from this file, as we did not implement the kriging algorithm,
and the ``image_align`` tool we added has its own interface.
Here are two sets of values for these parameters, optimized for CTX and
HiRISE cameras, respectively.
CTX::
ALSC iterations: 8
Max. eigenvalue: 150
ALSC kernel: 21
Grow neighbor: 8
HiRISE::
ALSC iterations: 8
Max. eigenvalue: 80
ALSC kernel: 11
Grow neighbor: 8
================================================
FILE: docs/glossary.rst
================================================
Glossary
========
.. glossary::
ASP
Ames Stereo Pipeline
CTX
Context Camera
DEM
digital elevation model, synonym for DTM
DTM
digital terrain model, synonym for DEM
ET
ephemeris time
GCP
ground control point (:numref:`bagcp`)
HRSC
High Resolution Stereo Camera
HiRISE
High Resolution Imaging Science Experiment
IRG
Intelligent Robotics Group
ISIS
`Integrated Software for Imagers and Spectrometers `_
KML
Keyhole Markup Language
LROC
Lunar Reconnaissance Orbiter Camera
LRO
Lunar Reconnaissance Orbiter
LOLA
Lunar Orbiter Laser Altimeter
MER
Mars Exploration Rover
MGS
Mars Global Surveyor
MOC
Mars Orbiter Camera
MOLA
Mars Orbiter Laser Altimeter
MRO
Mars Reconnaissance Orbiter
MPL
Mars Polar Lander
NED
National Elevation Dataset
NASA
National Aeronautics and Space Administration
PDS
Planetary Data System
PVL
Parameter Value Language
THEMIS
Thermal Emission Imaging System
ULCN
Unified Lunar Coordinate Network
USGS
United States Geological Survey
VW
Vision Workbench
================================================
FILE: docs/index.rst
================================================
.. Ames Stereo Pipeline documentation master file.
Ames Stereo Pipeline documentation (version |version|)
======================================================
.. toctree::
:maxdepth: 2
:numbered:
introduction
installation
tutorial
next_steps
tips_tricks
examples
bundle_adjustment
error_propagation
correlation
experimental
tools
stereodefault
stereo_algorithms
outputfiles
pinholemodels
bathy_water_masking
papersusingasp
news
contributing
building_asp
thirdparty
acknowledgements
glossary
zzreferences
.. only:: html
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`
================================================
FILE: docs/installation.rst
================================================
.. _installation:
.. include:: ../INSTALLGUIDE.rst
.. _vwrc:
Settings optimization
---------------------
Finally, the last thing to be done for Stereo Pipeline is to setup up
Vision Workbench's render and logging settings. This step is optional,
but for best performance some thought should be applied here.
Vision Workbench is a multi-threaded image processing library used by
Stereo Pipeline. The settings by which Vision Workbench processes data
are configurable by having a ``.vwrc`` file hidden in your home directory.
Below is an example::
# This is an example VW configuration file. Save this file to
# ~/.vwrc to adjust the VW log settings, even if the program is
#already running.
# General settings
[general]
default_num_threads = 16
write_pool_size = 40
system_cache_size = 1073741824 # ~ 1 GB
# The following integers are associated with the log levels
# throughout the Vision Workbench. Use these in the log rules
# below.
#
# ErrorMessage = 0
# WarningMessage = 10
# InfoMessage = 20
# DebugMessage = 30
# VerboseDebugMessage = 40
# EveryMessage = 100
#
# You can create a new log file or adjust the settings
# for the console log:
# logfile
# - or -
# logfile console
# Once you have created a logfile (or selected the console), you
# can add log rules using the following syntax. (Note that you
# can use wildcard characters '*' to catch all log_levels for a
# given log_namespace, or vice versa.)
#
# Below are examples of using the log settings.
# Turn on various logging levels for several subsystems, with
# the output going to the console (standard output).
[logfile console]
# Turn on error and warning messages for the thread subsystem.
10 = thread
# Turn on error, warning, and info messages for the
# asp subsystem.
20 = asp
# Turn on error, warning, info, and debug messages for the
# stereo subsystem.
30 = stereo
# Turn on every single message for the cache subsystem (this will
# be extremely verbose and is not recommended).
# 100 = cache
# Turn off all progress bars to the console (not recommended).
# 0 = *.progress
# Turn on logging of error and warning messages to a file for the
# stereo subsystem. Warning: This file will be always appended
# to, so it should be deleted periodically.
# [logfile /tmp/vw_log.txt]
# 10 = stereo
There are a lot of possible options that can be implemented in the above
example. Let's cover the most important options and the concerns the
user should have when selecting a value.
Performance settings
~~~~~~~~~~~~~~~~~~~~
``default_num_threads`` (default=2)
This sets the maximum number of threads that can be used for
rendering. When stereo's ``subpixel_rfne`` is running you'll
probably notice 10 threads are running when you have
``default_num_threads`` set to 8. This is not an error, you are
seeing 8 threads being used for rendering, 1 thread for holding
``main()``'s execution, and finally 1 optional thread acting as
the interface to the file driver.
It is usually best to set this parameter equal to the number of
processors on your system. Be sure to include the number of logical
processors in your arithmetic if your system supports
hyper-threading. Adding more threads for rasterization increases the
memory demands of Stereo Pipeline. If your system is memory limited,
it might be best to lower the ``default_num_threads`` option.
``write_pool_size`` (default=21)
The ``write_pool_size`` option represents the max waiting pool size
of tiles waiting to be written to disk. Most file formats do not
allow tiles to be written arbitrarily out of order. Most however
will let rows of tiles to be written out of order, while tiles
inside a row must be written in order. Because of the previous
constraint, after a tile is rasterized it might spend some time
waiting in the write pool before it can be written to disk. If
the write pool fills up, only the next tile in order can be
rasterized. That makes Stereo Pipeline perform like it is only
using a single processor.
Increasing the ``write_pool_size`` makes Stereo Pipeline more able to
use all processing cores in the system. Having this value too large
can mean excessive use of memory as it must keep more portions of the
image around in memory while they wait to be written. This number
should be larger than the number of threads, perhaps by about 20.
``system_cache_size`` (default=1073741824)
Accessing a file from the hard drive can be very slow. It is
especially bad if an application needs to make multiple passes over
an input file. To increase performance, Vision Workbench will
usually leave an input file stored in memory for quick access. This
file storage is known as the 'system cache' and its max size is
dictated by ``system_cache_size``. The default value is 1 GB.
Setting this value too high can cause your application to crash. It
is usually recommend to keep this value around 1/4 of the maximum
available memory on the system. The units of this property is in
bytes.
All tools shipped with ASP have the option ``--cache-size-mb`` to
override the value of ``system_cache_size``. Its default value is
1024 MB (1 GB).
The recommendations for these values are based on use of the block
matching algorithm in ASP. When using memory intensive algorithms
such as SGM you may wish to lower some of these values (such as the
cache size) to leave more memory available for the algorithm to use.
.. _logging:
Logging settings
~~~~~~~~~~~~~~~~
The messages displayed in the console by Stereo Pipeline are grouped
into several namespaces, and by level of verbosity. An example of
customizing Stereo Pipeline's output is given in the ``.vwrc`` file
shown above.
Several of the tools in Stereo Pipeline, including ``parallel_stereo``,
automatically append the information displayed in the console to a log
file in the current output directory. These logs contain in addition
some data about your system and settings, which may be helpful in
resolving problems with the tools (:numref:`outputfiles`).
It is also possible to specify a global log file to which all tools will
append to, as illustrated in ``.vwrc``.
================================================
FILE: docs/introduction.rst
================================================
Introduction
============
The NASA Ames Stereo Pipeline (ASP) is a suite of free and open source automated
geodesy and stereogrammetry tools designed for processing images captured from
satellites, around Earth and other planets (:numref:`examples`), robotic rovers
(:numref:`rig_msl`, :numref:`csm_msl`), aerial cameras and low-cost satellites
(:numref:`skysat`), historical images (:numref:`kh4`), with and without accurate
camera pose information.
It has functionality for 3D terrain creation from stereo (:numref:`tutorial`),
alignment of point clouds (:numref:`pc_align`), structure-from-motion
(:numref:`sfm`), shape-from-shading (:numref:`sfs_usage`), bundle adjustment
(:numref:`bundle_adjust`), solving for jitter (:numref:`jitter_solve`), rig
calibration (:numref:`rig_calibrator`), refining camera intrinsics
(:numref:`floatingintrinsics`), GCP generation (:numref:`gcp_gen`,
:numref:`dem2gcp`), and a versatile GUI shell (:numref:`stereo_gui`).
ASP produces cartographic products, including digital terrain models (DTMs) and
ortho-projected images (:numref:`builddem`), 3D models (:numref:`point2mesh`),
textured meshes (:numref:`sfm_iss`), and bundle-adjusted networks of cameras
(:numref:`control_network`).
ASP's data products are suitable for science analysis, mission planning, and
public outreach.
.. figure:: images/introduction/p19view2_400px.png
:alt: 3D model of Mars
This 3D model was generated from a image pair M01/00115 and E02/01461
(34.66N, 141.29E). The complete stereo reconstruction process takes
approximately thirty minutes on a 3.0 GHz workstation for input
images of this size (1024 |times| 8064 pixels). This model,
shown here without vertical exaggeration, is roughly 2 km wide in the
cross-track dimension.
Background
----------
The Intelligent Robotics Group (IRG) at the NASA Ames Research
Center has been developing 3D surface reconstruction and visualization
capabilities for planetary exploration for more than a decade. First
demonstrated during the Mars Pathfinder Mission, the IRG has delivered
tools providing these capabilities to the science operations teams
of the :term:`MPL` mission, the :term:`MER` mission, the :term:`MRO`
mission, and the :term:`LRO` mission. A critical component technology
enabling this work is the ASP. The Stereo Pipeline generates high
quality, dense, texture-mapped 3D surface models from stereo image
pairs. In addition, ASP provides tools to perform many other
cartography tasks including map projection, point cloud and DEM
registration, automatic registration of cameras, data format
conversion, and data visualization.
Although initially developed for ground control and scientific
visualization applications, the Stereo Pipeline has evolved to address
orbital stereogrammetry and cartographic applications. In particular,
long-range mission planning requires detailed knowledge of planetary
topography, and high resolution topography is often derived from stereo
pairs captured from orbit. Orbital mapping satellites are sent as
precursors to planetary bodies in advance of landers and rovers. They
return a wealth of images and other data that helps mission planners and
scientists identify areas worthy of more detailed study. Topographic
information often plays a central role in this planning and analysis
process.
Our recent development of the Stereo Pipeline coincides with a
period of time when NASA orbital mapping missions are returning
orders of magnitude more data than ever before. Data volumes from
the Mars and Lunar Reconnaissance Orbiter missions now measure in
the tens of terabytes. There is growing consensus that existing
processing techniques, which are still extremely human intensive
and expensive, are no longer adequate to address the data processing
needs of NASA and the Planetary Science community. To pick an example
of particular relevance, the :term:`HiRISE` instrument has captured
a few thousand stereo pairs. Of these, only about two hundred stereo
pairs have been processed to date; mostly on human-operated, high-end
photogrammetric workstations. It is clear that much more value could
be extracted from this valuable raw data if a more streamlined,
efficient process could be developed.
The Stereo Pipeline was designed to address this very need. By applying
recent advances in computer vision, we have created an *automated*
process that is capable of generating high quality DTMs with minimal human
intervention. Users of the Stereo Pipeline can expect to spend some time
picking a handful of settings when they first start processing a new
type of image, but once this is done, the Stereo Pipeline can be used to
process tens, hundreds, or even thousands of stereo pairs without
further adjustment. With the release of this software, we hope to
encourage the adoption of this tool chain at institutions that run and
support these remote sensing missions. Over time, we hope to see this
tool incorporated into ground data processing systems alongside other
automated image processing pipelines. As this tool continues to mature,
we believe that it will be capable of producing digital elevation models
of exceptional quality without any human intervention.
Human vs. Computer: When to Choose Automation?
----------------------------------------------
When is it appropriate to choose automated stereo mapping over the use
of a conventional, human-operated photogrammetric workstation? This is a
philosophical question with an answer that is likely to evolve over the
coming years as automated data processing technologies become more
robust and widely adopted. For now, our opinion is that you should
*always* rely on human-guided, manual data processing techniques for
producing mission critical data products for missions where human lives
or considerable capital resources are at risk. In particular, maps for
landing site analysis and precision landing absolutely require the
benefit of an expert human operator to eliminate obvious errors in the
DEMs, and also to guarantee that the proper procedures have been
followed to correct satellite telemetry errors so that the data have the
best possible geodetic control.
When it comes to using DTMs for scientific analysis, both techniques have
their merits. Human-guided stereo reconstruction produces DTMs of
unparalleled quality that benefit from the intuition and experience of
an expert. The process of building and validating these DTMs is
well-established and accepted in the scientific community.
However, only a limited number of DTMs can be processed to this level of
quality. For the rest, automated stereo processing can be used to
produce DTMs at a fraction of the cost. The results are not necessarily less
accurate than those produced by the human operator, but they will not
benefit from the same level of scrutiny and quality control. As such,
users of these DTMs must be able to identify potential issues, and be on the
lookout for errors that may result from the improper use of these tools.
We recommend that all users of the Stereo Pipeline take the time to
thoroughly read this documentation and build an understanding of how
stereo reconstruction and bundle adjustment can be best used together to
produce high quality results. You are welcome to contact us if you have
any questions (:numref:`get-help`).
Software foundations
--------------------
NASA Vision Workbench
~~~~~~~~~~~~~~~~~~~~~
The Stereo Pipeline is built upon the Vision Workbench software which is
a general purpose image processing and computer vision library also
developed by the IRG. Some of the tools discussed in this document are
actually Vision Workbench programs, and any distribution of the Stereo
Pipeline requires the Vision Workbench. This distinction is important
only if compiling this software.
The USGS Integrated Software for Imagers and Spectrometers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For processing non-terrestrial NASA satellite images, Stereo Pipeline
must be installed alongside a copy of the Integrated Software for
Imagers and Spectrometers (:term:`ISIS`). ISIS is however not required for
processing terrestrial images (DigitalGlobe/Maxar WorldView, etc.).
ISIS is widely used in the planetary science community for processing raw
spacecraft images into high level data products of scientific interest
such as map-projected and mosaicked images
:cite:`2004LPI.35.2039A,1997LPI.28.387G,ISIS_website`.
We chose ISIS because (1) it is widely adopted by the planetary science
community, (2) it contains the authoritative collection of geometric
camera models for planetary remote sensing instruments, and (3) it is
open source software that is easy to leverage.
By installing the Stereo Pipeline, you will be adding an advanced stereo
image processing capability that can be used in your existing ISIS workflow.
The Stereo Pipeline supports the ISIS cube (``.cub``) file format, and can
make use of the camera models and ancillary information (i.e. SPICE
kernels) for imagers on many NASA spacecraft. The use of this single
standardized set of camera models ensures consistency between products
generated in the Stereo Pipeline and those generated by ISIS. Also by
leveraging ISIS camera models, the Stereo Pipeline can process stereo pairs
captured by just about any NASA mission.
.. _get-help:
Getting help and reporting bugs
-------------------------------
All bugs, feature requests, and general discussion should be posted on
the ASP support forum:
https://groups.google.com/forum/#!forum/ames-stereo-pipeline-support
To contact the developers and project manager directly, send an email
to:
stereo-pipeline-owner@lists.nasa.gov
When you submit a bug report, it may be helpful to attach the logs
output by ``parallel_stereo`` and other tools (:numref:`logging`).
Typographical conventions
-------------------------
Names of programs that are meant to be run on the command line are
written in a constant-width font, like the ``parallel_stereo`` program, as are
options to those programs.
An indented line of constant-width text can be typed into your terminal,
these lines will either begin with a '``>``' to denote a regular shell,
or with '``ISIS>``' which denotes an ISIS-enabled shell (which means you have
to set the ``ISISROOT`` environment variable and have sourced the appropriate
ISIS startup script, as detailed in the ISIS instructions).
::
> ls
ISIS> pds2isis
Constant-width text enclosed in greater-than and less-than brackets denotes an
option or argument that a user will need to supply. For example,
'``stereo E0201461.map.cub M0100115.map.cub out``' is specific, but
'``stereo out``' indicates that ````
and ```` are not the names of specific files, but dummy
parameters which need to be replaced with actual file names.
Square brackets denote optional options or values to a command, and
items separated by a vertical bar are either aliases for each other, or
different, specific options. Default arguments or other notes are
enclosed by parentheses, and line continuation with a backslash::
point2dem [-h|--help] [-r moon|mars] [-s ] \
[-o