Repository: dgrzech/sobfu
Branch: master
Commit: c83646582a14
Files: 73
Total size: 409.5 KB
Directory structure:
gitextract_kh2b_hh_/
├── .clang-format
├── .gitignore
├── .gitlab-ci.yml
├── .lintflags
├── .lintignore
├── CMakeLists.txt
├── LICENSE.md
├── README.md
├── cmake/
│ ├── Targets.cmake
│ └── Utils.cmake
├── include/
│ ├── kfusion/
│ │ ├── cuda/
│ │ │ ├── device.hpp
│ │ │ ├── device_array.hpp
│ │ │ ├── device_memory.hpp
│ │ │ ├── imgproc.hpp
│ │ │ ├── kernel_containers.hpp
│ │ │ ├── marching_cubes.hpp
│ │ │ ├── projective_icp.hpp
│ │ │ ├── temp_utils.hpp
│ │ │ ├── texture_binder.hpp
│ │ │ └── tsdf_volume.hpp
│ │ ├── exports.hpp
│ │ ├── internal.hpp
│ │ ├── kinfu.hpp
│ │ ├── precomp.hpp
│ │ ├── safe_call.hpp
│ │ └── types.hpp
│ └── sobfu/
│ ├── cuda/
│ │ └── utils.hpp
│ ├── params.hpp
│ ├── precomp.hpp
│ ├── reductor.hpp
│ ├── scalar_fields.hpp
│ ├── sob_fusion.hpp
│ ├── solver.hpp
│ └── vector_fields.hpp
├── params/
│ ├── params_advent.ini
│ ├── params_boxing.ini
│ ├── params_hat.ini
│ ├── params_ours.ini
│ ├── params_snoopy.ini
│ └── params_umbrella.ini
├── setup.sh
├── src/
│ ├── CMakeLists.txt
│ ├── apps/
│ │ ├── CMakeLists.txt
│ │ └── demo.cpp
│ ├── kfusion/
│ │ ├── CMakeLists.txt
│ │ ├── core.cpp
│ │ ├── cuda/
│ │ │ ├── imgproc.cu
│ │ │ ├── marching_cubes.cu
│ │ │ ├── proj_icp.cu
│ │ │ └── tsdf_volume.cu
│ │ ├── device_memory.cpp
│ │ ├── imgproc.cpp
│ │ ├── kinfu.cpp
│ │ ├── marching_cubes.cpp
│ │ ├── precomp.cpp
│ │ ├── projective_icp.cpp
│ │ └── tsdf_volume.cpp
│ └── sobfu/
│ ├── CMakeLists.txt
│ ├── cuda/
│ │ ├── reductor.cu
│ │ ├── scalar_fields.cu
│ │ ├── solver.cu
│ │ └── vector_fields.cu
│ ├── precomp.cpp
│ ├── reductor.cpp
│ ├── scalar_fields.cpp
│ ├── sob_fusion.cpp
│ ├── solver.cpp
│ └── vector_fields.cpp
└── test/
├── CMakeLists.txt
├── deformation_field_test.cpp
├── main.cpp
├── reductions_test.cpp
└── solver_test.cpp
================================================
FILE CONTENTS
================================================
================================================
FILE: .clang-format
================================================
---
# ---------------------------------------------------------------------------- #
# CLANG-FORMAT #
# v6.0.x #
# ---------------------------------------------------------------------------- #
# For documentation: https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
Language: Cpp
# BasedOnStyle: Google
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: true
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: true
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: true
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
ColumnLimit: 120
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: true
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^<.*\.h>'
Priority: 1
- Regex: '^<.*'
Priority: 2
- Regex: '.*'
Priority: 3
IndentCaseLabels: true
IndentWidth: 4
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: false
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 8
UseTab: Never
...
================================================
FILE: .gitignore
================================================
# Created by https://www.gitignore.io/api/vim,osx,c++
/data
### C++ ###
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
### OSX ###
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
### Vim ###
# swap
[._]*.s[a-v][a-z]
[._]*.sw[a-p]
[._]s[a-v][a-z]
[._]sw[a-p]
# session
Session.vim
# temporary
.netrwhist
*~
# auto-generated tag files
tags
# End of https://www.gitignore.io/api/vim,osx,c++
# Ignore build directory
build
files
# ignore remote sync config file
.remote-sync.json
# Ignore frames
frames
# ignore helper MATLAB script to display point clouds
show_pcl.m
# ignore output folder
out
================================================
FILE: .gitlab-ci.yml
================================================
# ---------------------------------------------------------------------------- #
# GITLAB-RUNNER-CI #
# ---------------------------------------------------------------------------- #
# Version: v9.5.4
# Running on graphic01.doc.ic.ac.uk
# ---------------------------------------------------------------------------- #
# PIPELINE #
# ---------------------------------------------------------------------------- #
# The CI pipeline is divided into the following stages:
# Lint: Performs linter checks on the repo's code
# Build: Builds the project
# Test: Runs the project's tests
# Deploy: Deploys the project to a given directory
# Cleanup: Removes all artifacts required for the CI stages
stages:
- Setup
- Lint
- Build
- Test
- Deploy
- Cleanup
# env_setup defines a number of environment variables which can be called by
# various before_scripts of the project.
# 1) Exporting the PATH is necessary to enable linking of the custom installed
# binaries in the group's shared directory.
# 2) setup.sh makes sure all Cuda related binaries are setup correctly.
# This includes, but isn't limited to, CUDA itself and OpenGL.
.env_setup: &env_setup |
source setup.sh --set-paths
# Variables for use in script
variables:
CONTAINER_IMAGE: g1736211/dynfu
DEV_IMAGE: g1736211/dynfu:dev
TAG_IMAGE: g1736211/dynfu:${CI_COMMIT_REF_SLUG}
# ---------------------------------------------------------------------------- #
# SETUP #
# ---------------------------------------------------------------------------- #
# The Setup stage sets all the nevironment variables and paths using cmake.
# > cmake <
# Cmake will generate a Makefile for the project linking all needed dependencies
# Cmake stage is required to not fail, in which case it would fail the pipeline.
cmake:
stage: Setup
# Before attempting to cmake env variables must be set to link the binaries
# Call .setup.sh which creates build and setup the dependencies
# Cmake will also build inside the build/ directory
before_script:
- rm -rf build || true
- source setup.sh --set-paths
# Build/ directory is cached for a given commit SHA to make sure all the tasks
# within a same pipeline will have access to the correctly built project
cache:
key: "$CI_COMMIT_SHA"
paths:
- build/
# Cmake requires two additional flags:
# 1) CMAKE_PREFIX enables it to find the custom built binaries
# 2) CMAKE_CXX_FLAGS are necessary to be set as otherwise BASH doesn't seem to
# set the c++ standard correctly (i.e. will throw a number of errors)
# 3) [Others]
script:
- source setup.sh --cmake -i
# ---------------------------------------------------------------------------- #
# LINT #
# ---------------------------------------------------------------------------- #
# The Lint stage runs the linter on the project's files.
# > clang_format <
# Clang formatting will ensure that the latest files have been formatted with
# clang-styling. This step does not fix linter errors, it will just point them
# out to the pipeline, failing it.
clang_format:
stage: Lint
# Set env variables before testing should we need to access CUDA of OpenGL
# headers.
before_script:
- *env_setup
# Files are considered to be source only if *.cpp or *.hpp.
# Excluded files or directories must be set manually in .lintignore
script:
# Print out the configuration file variables
- /vol/project/2017/362/g1736211/bin/clang-format -dump-config
# Attempt to check if clang-format had been run before saving
- ./scripts/.format_bash
# > lint <
# The linter used for the project is clang-tidy.
# 1) Linting stage must run after build due to the numerous dependencies created
# by Cmake. All these dependencies are logged into compile_commands.json and
# are referenced by clang-tidy when linting (i.e. to scan headers).
# 2) The run-clang-tidy python script must be linked to the relevant clang-tidy
# executable as it cannot be found in it's canonical place (i.e /usr/bin).
# 3) run-clang-tidy.py will accept all clang-tidy flags, it has extra value as
# it can use the compile_commands.json information.
#
# USED FLAGS:
# 1) -list-checks : Lists all the enabled checks.
# 2) -checks= : Sets different checkstyles for the source files
# 3) -p
: Sets the path for the build directory containing the
# compile_commands.json information.
# 4) -warnings-as-errors=* : Treats all warnings as errors.
#
# Used checks can be found in .lintflags
clang_tidy:
stage: Lint
# Set env variables before testing should we need to access CUDA of OpenGL
# headers.
before_script:
- *env_setup
# Linting must occur on the newly built build/ binaries.
cache:
key: "$CI_COMMIT_SHA"
paths:
- build/
policy: pull
# Files are considered to be source only if *.cpp or *.hpp.
# Excluded files or directories must be set manually in .lintignore
script:
# Print out the list of enabled checks
- /vol/project/2017/362/g1736211/bin/clang-tidy `./scripts/.flags_bash` -list-checks
# Print out the list of files which will be checked for linting
- ./scripts/.find_bash
# Perform the linting with the above configuratiions
- /vol/project/2017/362/g1736211/bin/clang-tidy -p build `./scripts/.diff_tidy_bash` `./scripts/.flags_bash` `./scripts/.find_bash`
# ---------------------------------------------------------------------------- #
# BUILD #
# ---------------------------------------------------------------------------- #
# The build project actually generates the binaries for the project.
# According to the flags set during the CMake stage, tests are built as well in
# their respective directories.
# > make <
# The make stage will run the Makefile generated by cmake in the build/ dir.
# The make stage is required not to fail.
make:
stage: Build
# Before attempting to make env variables must be set to make sure all
# binaries referenced by the Makefile can be accessed.
# Make will also build the executables inside the build/ directory
before_script:
- *env_setup
# Build/ directory is cached for a given commit SHA to make sure all the tasks
# within a same pipeline will have access to the correctly built project
cache:
key: "$CI_COMMIT_SHA"
paths:
- build/
# Run simple make from the Makefile on four processes.
script:
- source setup.sh --make
# > docker_image_build <
# The docker_image_build stage creates the Docker image for an EC2 instance
# on a docker-compatible runner
docker_image_build:
stage: Build
tags:
- docker
script:
- docker build --build-arg CUDA_GENERATION=Maxwell -t $TAG_IMAGE .
except:
- master
# > docker_image_build_master <
# The docker_image_build_master stage creates the Docker image for all architectures
# on a docker-compatible runner
docker_image_build_master:
stage: Build
tags:
- docker
script:
- docker build --build-arg CUDA_GENERATION=Auto -t $TAG_IMAGE .
only:
- master
# ---------------------------------------------------------------------------- #
# TEST #
# ---------------------------------------------------------------------------- #
# The Test stage will perform a number of tests on the Project.
# > gtest <
# The gtest stage runs the GTest suite present within the library.
# GTest binaries can be found in the group shared directory.
gtest:
stage: Test
# Set env variables before testing should we need to access CUDA of OpenGL
# headers.
before_script:
- *env_setup
# Testing must occur on the newly built build/ binaries.
cache:
key: "$CI_COMMIT_SHA"
paths:
- build/
policy: pull
# Run the gtests
script:
- ./build/bin/dynfu_test
# > coverage <
# The coverage stage runs gcov (and on top of that lcov) to generate a coverage
# report of the test for the source files.
coverage:
stage: Test
# Set env variables before testing should we need to access CUDA of OpenGL
# headers.
before_script:
- *env_setup
# Coverage report is constructed for the newly build binaries
cache:
key: "$CI_COMMIT_SHA"
paths:
- build/
policy: pull
# Generate the coverage report
script:
- source setup.sh --coverage
# ---------------------------------------------------------------------------- #
# DEPLOY #
# ---------------------------------------------------------------------------- #
# The Deploy stage deploys newly built and tested binaries to a given location
# on disk.
# > deploy <
# The deploy stage will deploy the newly built project binaries
# to /data/dynfu.
deploy:
stage: Deploy
# The latest build build/ must be retrieved from cache
cache:
key: "$CI_COMMIT_SHA"
paths:
- build/
policy: pull
# Clean the output distributed library directory to clone new data into it
before_script:
- rm -rf /data/dynfu
- mkdir /data/dynfu
# Copy the latest build BINARIES to /data/
script:
- cp -R build/bin/* /data/dynfu
# > docker_image__deploy_dev <
# Deploys to Docker Hub
docker_image_deploy_dev:
stage: Deploy
tags:
- docker
script:
- docker login -u $DOCKER_HUB_LOGIN -p $DOCKER_HUB_PASSWORD
- docker tag $TAG_IMAGE $DEV_IMAGE
- docker push $DEV_IMAGE
only:
- dev
# > docker_image_deploy <
# Deploys to Docker Hub
docker_image_deploy:
stage: Deploy
tags:
- docker
script:
- docker login -u $DOCKER_HUB_LOGIN -p $DOCKER_HUB_PASSWORD
- docker tag $TAG_IMAGE $CONTAINER_IMAGE
- docker push $CONTAINER_IMAGE
only:
- master
# > test_dynfu_on_ec2 <
# Trigger testing on ec2 instance
test_dynfu_on_ec2:
stage: Deploy
tags:
- docker
before_script:
- pip install awscli
- export PATH="${PATH}:~/.local/bin"
script:
- AWS_ACCESS_KEY_ID=${UPDATE_LAMBDA_KEY} AWS_SECRET_ACCESS_KEY=${UPDATE_LAMBDA_PASS} AWS_DEFAULT_REGION=eu-west-1 aws lambda invoke --function-name dynamic-fusion-trigger-testing /tmp/lambda-out
only:
- dev
# ---------------------------------------------------------------------------- #
# CLEANUP #
# ---------------------------------------------------------------------------- #
# The Cleanup stage makes sure the cache storage is cleaned for a given
# pipeline in the runners directories.
# > cache_cleanup <
# It runs regardless of failure or success, and thus has the aim of cleaning
# up the testing environment. In particular this script must remove unused
# cache artifacts that were stored within one pipeline.
cache_cleanup:
stage: Cleanup
script:
- echo "Present Cache:" && ls /home/gitlab-runner/cache/g1736211/dynfu/
- rm -rf "/home/gitlab-runner/cache/g1736211/dynfu/$CI_COMMIT_SHA"
when: always
================================================
FILE: .lintflags
================================================
# ---------------------------------------------------------------------------- #
# FILE FORMAT #
# ---------------------------------------------------------------------------- #
# Available checks can be found at http://clang.llvm.org/extra/clang-tidy/ #
# To add a new check add it normally #
# To PREVENT the linter from using a check, prefix with '-' #
# Extra links: https://clang.llvm.org/extra/clang-tidy/checks/list.html #
# Clang-tidy docs: https://clang.llvm.org/docs/ClangFormat.html #
# ---------------------------------------------------------------------------- #
# Enforce google standard
google-*
# Use High Integrity C++ Coding Standard
hicpp-*
# Use LLVM Coding Standard
llvm-*
# Use all miscellaneous flags
misc-*
# Apply readability constraints
readability-*
# Apply bugprone checks
bugprone-*
# ---------------------------------------------------------------------------- #
# The following are check DISABLES for certain behaviours
# Allow un-use of use auto
-hicpp-use-auto
# Allow string comparison .compare and not only via == or !=
-misc-string-compare
# Allow warnings for signed bitwise operators
-hicpp-signed-bitwise
# Allow printfs calls in C style
-hicpp-vararg
# Allow implicit bool casts and conversions
-readability-implicit-bool-cast
-readability-implicit-bool-conversion
# Prevent conflicting standards
-llvm-include-order
# Deprecated checks
-hicpp-special-member-functions
-hicpp-use-equals-delete
-hicpp-no-array-decay
-readability-delete-null-pointer
-hicpp-member-init
================================================
FILE: .lintignore
================================================
# Ignore the test directory when linting
./tests/*
# Ignore all git files when linting
./.git/*
# Ignore all scripts
./scripts/*
# Ignore the cmake directory
./cmake/*
# Ignore all contetnts of build directory
./build/*
================================================
FILE: CMakeLists.txt
================================================
# ---------------------------------------------------------------------------- #
# CMAKE INIT CONFIGURATIONS
# ---------------------------------------------------------------------------- #
cmake_minimum_required(VERSION 2.8.8)
# ---------------------------------------------------------------------------- #
# SET THE PROJECT
# ---------------------------------------------------------------------------- #
project(kfusion C CXX)
# ---------------------------------------------------------------------------- #
# UTILITY FUNCTIONS (MACROS) AND PATHS
# ---------------------------------------------------------------------------- #
LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/cmake/")
include(Utils)
include(Targets)
# ---------------------------------------------------------------------------- #
# CMAKE SETTINGS
# ---------------------------------------------------------------------------- #
# Use both Debug and Release versions
SET(CMAKE_CONFIGURATION_TYPES "Debug;Release")
# Set CXX to compile with C++11 standard
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -MP -MD -pthread -fpermissive")
# ---------------------------------------------------------------------------- #
# DEPENDENCIES
# ---------------------------------------------------------------------------- #
find_package(Boost 1.58.0 REQUIRED COMPONENTS filesystem program_options)
find_package(CUDA REQUIRED)
find_package(OpenCV REQUIRED COMPONENTS core viz highgui calib3d)
find_package(PCL 1.8 REQUIRED)
find_package(VTK REQUIRED)
# ---------------------------------------------------------------------------- #
# CUDA SETTINGS
# ---------------------------------------------------------------------------- #
SET(HAVE_CUDA 1)
LIST(APPEND CUDA_NVCC_FLAGS
"-gencode;arch=compute_61,code=sm_61;
--ftz=true;
--prec-div=false;
--prec-sqrt=false;
--expt-relaxed-constexpr"
)
if(UNIX OR APPLE)
LIST(APPEND CUDA_NVCC_FLAGS "-Xcompiler;-fPIC")
endif()
# ---------------------------------------------------------------------------- #
# SET INCLUDE PATHS
# ---------------------------------------------------------------------------- #
include_directories(include
${Boost_INCLUDE_DIRS}
${CUDA_INCLUDE_DIRS}
${OpenCV_INCLUDE_DIRS}
${PCL_INCLUDE_DIRS}
${VTK_INCLUDE_DIRS}
)
# ---------------------------------------------------------------------------- #
# BUILD SRC DIRECTORY
# ---------------------------------------------------------------------------- #
add_subdirectory(src)
# ---------------------------------------------------------------------------- #
# BUILD TESTS
# ---------------------------------------------------------------------------- #
if(BUILD_TESTS)
enable_testing()
add_subdirectory(test)
endif()
================================================
FILE: LICENSE.md
================================================
Copyright (c) 2012, Anatoly Baksheev
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================
FILE: README.md
================================================
sobfu
============
software for 3d reconstruction of non-rigidly deforming scenes using depth data, based on sobolevfusion
dependencies
-----------
* boost
* nvidia gpu with cuda >8.0
* opencv
* pcl 1.8.1
* vtk
installation
------------
to install, run `source setup.sh -a`
usage
------------
`./build/bin/app /path/to/data /path/to/params <--enable-viz> <--enable-log> <--verbose>`
* the data directory must include folders `color` and `depth`
* the folder `params` has files with recommended parameters for some of the scenes from volumedeform and killingfusion datasets
### options
* with `--enable-viz`, screenshots from the pcl viewer will be logged in .png format to `/path/to/data/screenshots`
* with `--enable-log`, meshes computed from the model tsdf's via marching cubes will be logged in .vtk format to `/path/to/data/meshes`
* `--verbose` and `--vverbose` control verbosity of the solver
example reconstructions
------------
reconstructions of some of the scenes from the volumedeform and killingfusion projects can be viewed on our [youtube playlist](http://bit.ly/sobfu-yt)
there is a large trade-off between frame rate and reconstruction quality--sample reconstructions ran at approx. 2fps
issues
------------
* drift in longer scenes due to less than perfect registration
* topological changes
references
------------
```
@InProceedings{Slavcheva_2018_CVPR,
author = {Slavcheva, Miroslava and Baust, Maximilian and Ilic, Slobodan},
title = {SobolevFusion: 3D Reconstruction of Scenes Undergoing Free Non-Rigid Motion},
booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2018}
}
```
================================================
FILE: cmake/Targets.cmake
================================================
################################################################################################
# short command to setup source group
function(kf_source_group group)
cmake_parse_arguments(VW_SOURCE_GROUP "" "" "GLOB" ${ARGN})
file(GLOB srcs ${VW_SOURCE_GROUP_GLOB})
#list(LENGTH ${srcs} ___size)
#if (___size GREATER 0)
source_group(${group} FILES ${srcs})
#endif()
endfunction()
################################################################################################
# short command for declaring includes from other modules
macro(declare_deps_includes)
foreach(__arg ${ARGN})
get_filename_component(___path "${CMAKE_SOURCE_DIR}/modules/${__arg}/include" ABSOLUTE)
if (EXISTS ${___path})
include_directories(${___path})
endif()
endforeach()
unset(___path)
unset(__arg)
endmacro()
################################################################################################
# short command for setting defeault target properties
function(default_properties target)
set_target_properties(${target} PROPERTIES
DEBUG_POSTFIX "d"
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
if (NOT ${target} MATCHES "^test_")
install(TARGETS ${the_target} RUNTIME DESTINATION ".")
endif()
endfunction()
function(test_props target)
#os_project_label(${target} "[test]")
if(USE_PROJECT_FOLDERS)
set_target_properties(${target} PROPERTIES FOLDER "Tests")
endif()
endfunction()
function(app_props target)
#os_project_label(${target} "[app]")
if(USE_PROJECT_FOLDERS)
set_target_properties(${target} PROPERTIES FOLDER "Apps")
endif()
endfunction()
################################################################################################
# short command for setting defeault target properties
function(default_properties target)
set_target_properties(${target} PROPERTIES
DEBUG_POSTFIX "d"
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib"
RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin")
if (NOT ${target} MATCHES "^test_")
install(TARGETS ${the_target} RUNTIME DESTINATION ".")
endif()
endfunction()
################################################################################################
# short command for adding library module
macro(add_module_library name)
set(module_name ${name})
FILE(GLOB_RECURSE sources *.cpp *.cu)
cuda_add_library(${module_name} STATIC ${sources})
if(MSVC)
set_target_properties(${module_name} PROPERTIES DEFINE_SYMBOL KFUSION_API_EXPORTS)
else()
add_definitions(-DKFUSION_API_EXPORTS)
endif()
default_properties(${module_name})
endmacro()
################################################################################################
# short command for adding application module
macro(add_application target)
FILE(GLOB_RECURSE sources *.cpp)
add_executable(${target} ${sources})
default_properties(${target})
endmacro()
################################################################################################
# short command for adding test target
macro(CREATE_TEST target)
FILE(GLOB SRCS *.cpp)
ADD_EXECUTABLE(${target} ${SRCS})
TARGET_LINK_LIBRARIES(${target} libgtest libgmock)
add_test(NAME ${target} COMMAND ${target})
default_properties(${target})
endmacro()
================================================
FILE: cmake/Utils.cmake
================================================
################################################################################################
# short alias for CMake logging
function(dmsg)
message(STATUS "<<${ARGN}")
endfunction()
################################################################################################
# Command checking if current module has cuda souces to compile
macro(check_cuda var)
file(GLOB cuda src/cuda/*.cu)
list(LENGTH cuda ___size)
if (HAVE_CUDA AND ___size GREATER 0)
set(${var} ON)
else()
set(${var} OFF)
endif()
unset(___size)
unset(cuda)
endmacro()
################################################################################################
# short command for adding library module
macro(warnings_disable)
if(NOT ENABLE_NOISY_WARNINGS)
set(_flag_vars "")
set(_msvc_warnings "")
set(_gxx_warnings "")
foreach(arg ${ARGN})
if(arg MATCHES "^CMAKE_")
list(APPEND _flag_vars ${arg})
elseif(arg MATCHES "^/wd")
list(APPEND _msvc_warnings ${arg})
elseif(arg MATCHES "^-W")
list(APPEND _gxx_warnings ${arg})
endif()
endforeach()
if(MSVC AND _msvc_warnings AND _flag_vars)
foreach(var ${_flag_vars})
foreach(warning ${_msvc_warnings})
set(${var} "${${var}} ${warning}")
endforeach()
endforeach()
elseif(CV_COMPILER_IS_GNU AND _gxx_warnings AND _flag_vars)
foreach(var ${_flag_vars})
foreach(warning ${_gxx_warnings})
if(NOT warning MATCHES "^-Wno-")
string(REPLACE "${warning}" "" ${var} "${${var}}")
string(REPLACE "-W" "-Wno-" warning "${warning}")
endif()
ocv_check_flag_support(${var} "${warning}" _varname)
if(${_varname})
set(${var} "${${var}} ${warning}")
endif()
endforeach()
endforeach()
endif()
unset(_flag_vars)
unset(_msvc_warnings)
unset(_gxx_warnings)
endif(NOT ENABLE_NOISY_WARNINGS)
endmacro()
################################################################################################
# Command for asstion options with some preconditions
macro(kf_option variable description value)
set(__value ${value})
set(__condition "")
set(__varname "__value")
foreach(arg ${ARGN})
if(arg STREQUAL "IF" OR arg STREQUAL "if")
set(__varname "__condition")
else()
list(APPEND ${__varname} ${arg})
endif()
endforeach()
unset(__varname)
if("${__condition}" STREQUAL "")
set(__condition 2 GREATER 1)
endif()
if(${__condition})
if("${__value}" MATCHES ";")
if(${__value})
option(${variable} "${description}" ON)
else()
option(${variable} "${description}" OFF)
endif()
elseif(DEFINED ${__value})
if(${__value})
option(${variable} "${description}" ON)
else()
option(${variable} "${description}" OFF)
endif()
else()
option(${variable} "${description}" ${__value})
endif()
else()
unset(${variable} CACHE)
endif()
unset(__condition)
unset(__value)
endmacro()
================================================
FILE: include/kfusion/cuda/device.hpp
================================================
#pragma once
#include
#include
#include
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// TsdfVolume
// __kf_device__ kfusion::device::TsdfVolume::TsdfVolume(float2 *_data, int3 _dims, float3 _voxel_size,
// float _trunc_dist, float _max_weight)
// : data(_data), dims(_dims), voxel_size(_voxel_size), trunc_dist(_trunc_dist), max_weight(_max_weight) {}
//
// __kf_device__ kfusion::device::TsdfVolume::TsdfVolume(const TsdfVolume &other)
// : data(other.data),
// dims(other.dims),
// voxel_size(other.voxel_size),
// trunc_dist(other.trunc_dist),
// max_weight(other.max_weight) {}
__kf_device__ float2 *kfusion::device::TsdfVolume::operator()(int x, int y, int z) {
return data + x + y * dims.x + z * dims.y * dims.x;
}
__kf_device__ const float2 *kfusion::device::TsdfVolume::operator()(int x, int y, int z) const {
return data + x + y * dims.x + z * dims.y * dims.x;
}
__kf_device__ float2 *kfusion::device::TsdfVolume::beg(int x, int y) const { return data + x + dims.x * y; }
__kf_device__ float2 *kfusion::device::TsdfVolume::zstep(float2 *const ptr) const { return ptr + dims.x * dims.y; }
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Projector
__kf_device__ float2 kfusion::device::Projector::operator()(const float3 &p) const {
float2 coo;
coo.x = __fmaf_rn(f.x, __fdividef(p.x, p.z), c.x);
coo.y = __fmaf_rn(f.y, __fdividef(p.y, p.z), c.y);
return coo;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Reprojector
__kf_device__ float3 kfusion::device::Reprojector::operator()(int u, int v, float z) const {
float x = z * (u - c.x) * finv.x;
float y = z * (v - c.y) * finv.y;
return make_float3(x, y, z);
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/// Utility
namespace kfusion {
namespace device {
__kf_device__ Vec3f operator*(const Mat3f &m, const Vec3f &v) {
return make_float3(dot(m.data[0], v), dot(m.data[1], v), dot(m.data[2], v));
}
__kf_device__ Vec3f operator*(const Aff3f &a, const Vec3f &v) { return a.R * v + a.t; }
/* operator for multiplication of an affine transform by a float */
__kf_device__ Aff3f operator*(const float &s, const Aff3f &a) {
Aff3f b;
b.R = a.R;
b.t = s * a.t;
return b;
}
/* 3-d matrix multiplication */
__kf_device__ Mat3f operator*(const Mat3f &m, const Mat3f &n) {
Mat3f r;
r.data[0] = m.data[0] * make_float3(n.data[0].x, n.data[1].x, n.data[2].x);
r.data[1] = m.data[1] * make_float3(n.data[0].y, n.data[1].y, n.data[2].y);
r.data[2] = m.data[2] * make_float3(n.data[0].z, n.data[1].z, n.data[2].z);
return r;
}
/* affine transform composition */
__kf_device__ Aff3f operator*(const Aff3f &a, const Aff3f &b) {
Aff3f c;
c.R = a.R * b.R;
c.t = a.t + b.t;
return c;
}
__kf_device__ Vec3f tr(const float4 &v) { return make_float3(v.x, v.y, v.z); }
struct plus {
__kf_device__ float operator()(float l, float r) const { return l + r; }
__kf_device__ double operator()(double l, double r) const { return l + r; }
};
} // namespace device
} // namespace kfusion
================================================
FILE: include/kfusion/cuda/device_array.hpp
================================================
#pragma once
#include
#include
#include
namespace kfusion {
namespace cuda {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b DeviceArray class
*
* \note Typed container for GPU memory with reference counting.
*
* \author Anatoly Baksheev
*/
template
class KF_EXPORTS DeviceArray : public DeviceMemory {
public:
/** \brief Element type. */
typedef T type;
/** \brief Element size. */
enum { elem_size = sizeof(T) };
/** \brief Empty constructor. */
DeviceArray();
/** \brief Allocates internal buffer in GPU memory
* \param size_t: number of elements to allocate
* */
DeviceArray(size_t size);
/** \brief Initializes with user allocated buffer. Reference counting is
* disabled in this case. \param ptr: pointer to buffer \param size: elemens
* number
* */
DeviceArray(T *ptr, size_t size);
/** \brief Copy constructor. Just increments reference counter. */
DeviceArray(const DeviceArray &other);
/** \brief Assigment operator. Just increments reference counter. */
DeviceArray &operator=(const DeviceArray &other);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was
* created before the function recreates it with new size. If new and old
* sizes are equal it does nothing. \param size: elemens number
* */
void create(size_t size);
/** \brief Decrements reference counter and releases internal buffer if
* needed. */
void release();
/** \brief Performs data copying. If destination size differs it will be
* reallocated. \param other_arg: destination container
* */
void copyTo(DeviceArray &other) const;
/** \brief Uploads data to internal buffer in GPU memory. It calls create()
* inside to ensure that intenal buffer size is enough. \param host_ptr_arg:
* pointer to buffer to upload \param size: elemens number
* */
void upload(const T *host_ptr, size_t size);
/** \brief Downloads data from internal buffer to CPU memory
* \param host_ptr_arg: pointer to buffer to download
* */
void download(T *host_ptr) const;
/** \brief Uploads data to internal buffer in GPU memory. It calls create()
* inside to ensure that intenal buffer size is enough. \param data: host
* vector to upload from
* */
template
void upload(const std::vector &data);
/** \brief Downloads data from internal buffer to CPU memory
* \param data: host vector to download to
* */
template
void download(std::vector &data) const;
/** \brief Performs swap of data pointed with another device array.
* \param other: device array to swap with
* */
void swap(DeviceArray &other_arg);
/** \brief Returns pointer for internal buffer in GPU memory. */
T *ptr();
/** \brief Returns const pointer for internal buffer in GPU memory. */
const T *ptr() const;
// using DeviceMemory::ptr;
/** \brief Returns pointer for internal buffer in GPU memory. */
operator T *();
/** \brief Returns const pointer for internal buffer in GPU memory. */
operator const T *() const;
/** \brief Returns size in elements. */
size_t size() const;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b DeviceArray2D class
*
* \note Typed container for pitched GPU memory with reference counting.
*
* \author Anatoly Baksheev
*/
template
class KF_EXPORTS DeviceArray2D : public DeviceMemory2D {
public:
/** \brief Element type. */
typedef T type;
/** \brief Element size. */
enum { elem_size = sizeof(T) };
/** \brief Empty constructor. */
DeviceArray2D();
/** \brief Allocates internal buffer in GPU memory
* \param rows: number of rows to allocate
* \param cols: number of elements in each row
* */
DeviceArray2D(int rows, int cols);
/** \brief Initializes with user allocated buffer. Reference counting is
* disabled in this case. \param rows: number of rows \param cols: number of
* elements in each row \param data: pointer to buffer \param stepBytes:
* stride between two consecutive rows in bytes
* */
DeviceArray2D(int rows, int cols, void *data, size_t stepBytes);
/** \brief Copy constructor. Just increments reference counter. */
DeviceArray2D(const DeviceArray2D &other);
/** \brief Assigment operator. Just increments reference counter. */
DeviceArray2D &operator=(const DeviceArray2D &other);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was
* created before the function recreates it with new size. If new and old
* sizes are equal it does nothing. \param rows: number of rows to allocate
* \param cols: number of elements in each row
* */
void create(int rows, int cols);
/** \brief Decrements reference counter and releases internal buffer if
* needed. */
void release();
/** \brief Performs data copying. If destination size differs it will be
* reallocated. \param other: destination container
* */
void copyTo(DeviceArray2D &other) const;
/** \brief Uploads data to internal buffer in GPU memory. It calls create()
* inside to ensure that intenal buffer size is enough. \param host_ptr:
* pointer to host buffer to upload \param host_step: stride between two
* consecutive rows in bytes for host buffer \param rows: number of rows to
* upload \param cols: number of elements in each row
* */
void upload(const void *host_ptr, size_t host_step, int rows, int cols);
/** \brief Downloads data from internal buffer to CPU memory. User is
* resposible for correct host buffer size. \param host_ptr: pointer to host
* buffer to download \param host_step: stride between two consecutive rows in
* bytes for host buffer
* */
void download(void *host_ptr, size_t host_step) const;
/** \brief Performs swap of data pointed with another device array.
* \param other: device array to swap with
* */
void swap(DeviceArray2D &other_arg);
/** \brief Uploads data to internal buffer in GPU memory. It calls create()
* inside to ensure that intenal buffer size is enough. \param data: host
* vector to upload from \param cols: stride in elements between two
* consecutive rows for host buffer
* */
template
void upload(const std::vector &data, int cols);
/** \brief Downloads data from internal buffer to CPU memory
* \param data: host vector to download to
* \param cols: Output stride in elements between two consecutive rows for
* host vector.
* */
template
void download(std::vector &data, int &cols) const;
/** \brief Returns pointer to given row in internal buffer.
* \param y_arg: row index
* */
T *ptr(int y = 0);
/** \brief Returns const pointer to given row in internal buffer.
* \param y_arg: row index
* */
const T *ptr(int y = 0) const;
// using DeviceMemory2D::ptr;
/** \brief Returns pointer for internal buffer in GPU memory. */
operator T *();
/** \brief Returns const pointer for internal buffer in GPU memory. */
operator const T *() const;
/** \brief Returns number of elements in each row. */
int cols() const;
/** \brief Returns number of rows. */
int rows() const;
/** \brief Returns step in elements. */
size_t elem_step() const;
};
} // namespace cuda
namespace device {
using kfusion::cuda::DeviceArray;
using kfusion::cuda::DeviceArray2D;
} // namespace device
} // namespace kfusion
///////////////////// Inline implementations of DeviceArray
///////////////////////////////////////////////
template
inline kfusion::cuda::DeviceArray::DeviceArray() {}
template
inline kfusion::cuda::DeviceArray::DeviceArray(size_t size) : DeviceMemory(size * elem_size) {}
template
inline kfusion::cuda::DeviceArray::DeviceArray(T *ptr, size_t size) : DeviceMemory(ptr, size * elem_size) {}
template
inline kfusion::cuda::DeviceArray::DeviceArray(const DeviceArray &other) : DeviceMemory(other) {}
template
inline kfusion::cuda::DeviceArray &kfusion::cuda::DeviceArray::operator=(const DeviceArray &other) {
DeviceMemory::operator=(other);
return *this;
}
template
inline void kfusion::cuda::DeviceArray::create(size_t size) {
DeviceMemory::create(size * elem_size);
}
template
inline void kfusion::cuda::DeviceArray::release() {
DeviceMemory::release();
}
template
inline void kfusion::cuda::DeviceArray::copyTo(DeviceArray &other) const {
DeviceMemory::copyTo(other);
}
template
inline void kfusion::cuda::DeviceArray::upload(const T *host_ptr, size_t size) {
DeviceMemory::upload(host_ptr, size * elem_size);
}
template
inline void kfusion::cuda::DeviceArray::download(T *host_ptr) const {
DeviceMemory::download(host_ptr);
}
template
void kfusion::cuda::DeviceArray::swap(DeviceArray &other_arg) {
DeviceMemory::swap(other_arg);
}
template
inline kfusion::cuda::DeviceArray::operator T *() {
return ptr();
}
template
inline kfusion::cuda::DeviceArray::operator const T *() const {
return ptr();
}
template
inline size_t kfusion::cuda::DeviceArray::size() const {
return sizeBytes() / elem_size;
}
template
inline T *kfusion::cuda::DeviceArray::ptr() {
return DeviceMemory::ptr();
}
template
inline const T *kfusion::cuda::DeviceArray::ptr() const {
return DeviceMemory::ptr();
}
template
template
inline void kfusion::cuda::DeviceArray::upload(const std::vector &data) {
upload(&data[0], data.size());
}
template
template
inline void kfusion::cuda::DeviceArray::download(std::vector &data) const {
data.resize(size());
if (!data.empty())
download(&data[0]);
}
///////////////////// Inline implementations of DeviceArray2D
///////////////////////////////////////////////
template
inline kfusion::cuda::DeviceArray2D::DeviceArray2D() {}
template
inline kfusion::cuda::DeviceArray2D::DeviceArray2D(int rows, int cols) : DeviceMemory2D(rows, cols * elem_size) {}
template
inline kfusion::cuda::DeviceArray2D::DeviceArray2D(int rows, int cols, void *data, size_t stepBytes)
: DeviceMemory2D(rows, cols * elem_size, data, stepBytes) {}
template
inline kfusion::cuda::DeviceArray2D::DeviceArray2D(const DeviceArray2D &other) : DeviceMemory2D(other) {}
template
inline kfusion::cuda::DeviceArray2D &kfusion::cuda::DeviceArray2D::operator=(const DeviceArray2D &other) {
DeviceMemory2D::operator=(other);
return *this;
}
template
inline void kfusion::cuda::DeviceArray2D::create(int rows, int cols) {
DeviceMemory2D::create(rows, cols * elem_size);
}
template
inline void kfusion::cuda::DeviceArray2D::release() {
DeviceMemory2D::release();
}
template
inline void kfusion::cuda::DeviceArray2D::copyTo(DeviceArray2D &other) const {
DeviceMemory2D::copyTo(other);
}
template
inline void kfusion::cuda::DeviceArray2D::upload(const void *host_ptr, size_t host_step, int rows, int cols) {
DeviceMemory2D::upload(host_ptr, host_step, rows, cols * elem_size);
}
template
inline void kfusion::cuda::DeviceArray2D::download(void *host_ptr, size_t host_step) const {
DeviceMemory2D::download(host_ptr, host_step);
}
template
template
inline void kfusion::cuda::DeviceArray2D::upload(const std::vector &data, int cols) {
upload(&data[0], cols * elem_size, data.size() / cols, cols);
}
template
template
inline void kfusion::cuda::DeviceArray2D::download(std::vector &data, int &elem_step) const {
elem_step = cols();
data.resize(cols() * rows());
if (!data.empty())
download(&data[0], colsBytes());
}
template
void kfusion::cuda::DeviceArray2D::swap(DeviceArray2D &other_arg) {
DeviceMemory2D::swap(other_arg);
}
template
inline T *kfusion::cuda::DeviceArray2D::ptr(int y) {
return DeviceMemory2D::ptr(y);
}
template
inline const T *kfusion::cuda::DeviceArray2D::ptr(int y) const {
return DeviceMemory2D::ptr(y);
}
template
inline kfusion::cuda::DeviceArray2D::operator T *() {
return ptr();
}
template
inline kfusion::cuda::DeviceArray2D::operator const T *() const {
return ptr();
}
template
inline int kfusion::cuda::DeviceArray2D::cols() const {
return DeviceMemory2D::colsBytes() / elem_size;
}
template
inline int kfusion::cuda::DeviceArray2D::rows() const {
return DeviceMemory2D::rows();
}
template
inline size_t kfusion::cuda::DeviceArray2D::elem_step() const {
return DeviceMemory2D::step() / elem_size;
}
================================================
FILE: include/kfusion/cuda/device_memory.hpp
================================================
#pragma once
#include
#include
namespace kfusion {
namespace cuda {
/** \brief Error handler. All GPU functions from this subsystem call the
* function to report an error. For internal use only */
KF_EXPORTS void error(const char *error_string, const char *file, const int line, const char *func = "");
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b DeviceMemory class
*
* \note This is a BLOB container class with reference counting for GPU memory.
*
* \author Anatoly Baksheev
*/
class KF_EXPORTS DeviceMemory {
public:
/** \brief Empty constructor. */
DeviceMemory();
/** \brief Destructor. */
~DeviceMemory();
/** \brief Allocates internal buffer in GPU memory
* \param sizeBytes_arg: amount of memory to allocate
* */
DeviceMemory(size_t sizeBytes_arg);
/** \brief Initializes with user allocated buffer. Reference counting is
* disabled in this case. \param ptr_arg: pointer to buffer \param
* sizeBytes_arg: buffer size
* */
DeviceMemory(void *ptr_arg, size_t sizeBytes_arg);
/** \brief Copy constructor. Just increments reference counter. */
DeviceMemory(const DeviceMemory &other_arg);
/** \brief Assigment operator. Just increments reference counter. */
DeviceMemory &operator=(const DeviceMemory &other_arg);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was
* created before the function recreates it with new size. If new and old
* sizes are equal it does nothing. \param sizeBytes_arg: buffer size
* */
void create(size_t sizeBytes_arg);
/** \brief Decrements reference counter and releases internal buffer if
* needed. */
void release();
/** \brief Performs data copying. If destination size differs it will be
* reallocated. \param other_arg: destination container
* */
void copyTo(DeviceMemory &other) const;
/** \brief Uploads data to internal buffer in GPU memory. It calls create()
* inside to ensure that intenal buffer size is enough. \param host_ptr_arg:
* pointer to buffer to upload \param sizeBytes_arg: buffer size
* */
void upload(const void *host_ptr_arg, size_t sizeBytes_arg);
/** \brief Downloads data from internal buffer to CPU memory
* \param host_ptr_arg: pointer to buffer to download
* */
void download(void *host_ptr_arg) const;
/** \brief Performs swap of data pointed with another device memory.
* \param other: device memory to swap with
* */
void swap(DeviceMemory &other_arg);
/** \brief Returns pointer for internal buffer in GPU memory. */
template
T *ptr();
/** \brief Returns constant pointer for internal buffer in GPU memory. */
template
const T *ptr() const;
/** \brief Conversion to PtrSz for passing to kernel functions. */
template
operator PtrSz() const;
/** \brief Returns true if unallocated otherwise false. */
bool empty() const;
size_t sizeBytes() const;
private:
/** \brief Device pointer. */
void *data_;
/** \brief Allocated size in bytes. */
size_t sizeBytes_;
/** \brief Pointer to reference counter in CPU memory. */
int *refcount_;
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/** \brief @b DeviceMemory2D class
*
* \note This is a BLOB container class with reference counting for pitched GPU
* memory.
*
* \author Anatoly Baksheev
*/
class KF_EXPORTS DeviceMemory2D {
public:
/** \brief Empty constructor. */
DeviceMemory2D();
/** \brief Destructor. */
~DeviceMemory2D();
/** \brief Allocates internal buffer in GPU memory
* \param rows_arg: number of rows to allocate
* \param colsBytes_arg: width of the buffer in bytes
* */
DeviceMemory2D(int rows_arg, int colsBytes_arg);
/** \brief Initializes with user allocated buffer. Reference counting is
* disabled in this case. \param rows_arg: number of rows \param
* colsBytes_arg: width of the buffer in bytes \param data_arg: pointer to
* buffer \param stepBytes_arg: stride between two consecutive rows in bytes
* */
DeviceMemory2D(int rows_arg, int colsBytes_arg, void *data_arg, size_t step_arg);
/** \brief Copy constructor. Just increments reference counter. */
DeviceMemory2D(const DeviceMemory2D &other_arg);
/** \brief Assigment operator. Just increments reference counter. */
DeviceMemory2D &operator=(const DeviceMemory2D &other_arg);
/** \brief Allocates internal buffer in GPU memory. If internal buffer was
* created before the function recreates it with new size. If new and old
* sizes are equal it does nothing. \param ptr_arg: number of rows to allocate
* \param sizeBytes_arg: width of the buffer in bytes
* */
void create(int rows_arg, int colsBytes_arg);
/** \brief Decrements reference counter and releases internal buffer if
* needed. */
void release();
/** \brief Performs data copying. If destination size differs it will be
* reallocated. \param other_arg: destination container
* */
void copyTo(DeviceMemory2D &other) const;
/** \brief Uploads data to internal buffer in GPU memory. It calls create()
* inside to ensure that intenal buffer size is enough. \param host_ptr_arg:
* pointer to host buffer to upload \param host_step_arg: stride between two
* consecutive rows in bytes for host buffer \param rows_arg: number of rows
* to upload \param sizeBytes_arg: width of host buffer in bytes
* */
void upload(const void *host_ptr_arg, size_t host_step_arg, int rows_arg, int colsBytes_arg);
/** \brief Downloads data from internal buffer to CPU memory. User is
* resposible for correct host buffer size. \param host_ptr_arg: pointer to
* host buffer to download \param host_step_arg: stride between two
* consecutive rows in bytes for host buffer
* */
void download(void *host_ptr_arg, size_t host_step_arg) const;
/** \brief Performs swap of data pointed with another device memory.
* \param other: device memory to swap with
* */
void swap(DeviceMemory2D &other_arg);
/** \brief Returns pointer to given row in internal buffer.
* \param y_arg: row index
* */
template
T *ptr(int y_arg = 0);
/** \brief Returns constant pointer to given row in internal buffer.
* \param y_arg: row index
* */
template
const T *ptr(int y_arg = 0) const;
/** \brief Conversion to PtrStep for passing to kernel functions. */
template
operator PtrStep() const;
/** \brief Conversion to PtrStepSz for passing to kernel functions. */
template
operator PtrStepSz() const;
/** \brief Returns true if unallocated otherwise false. */
bool empty() const;
/** \brief Returns number of bytes in each row. */
int colsBytes() const;
/** \brief Returns number of rows. */
int rows() const;
/** \brief Returns stride between two consecutive rows in bytes for internal
* buffer. Step is stored always and everywhere in bytes!!! */
size_t step() const;
private:
/** \brief Device pointer. */
void *data_;
/** \brief Stride between two consecutive rows in bytes for internal buffer.
* Step is stored always and everywhere in bytes!!! */
size_t step_;
/** \brief Width of the buffer in bytes. */
int colsBytes_;
/** \brief Number of rows. */
int rows_;
/** \brief Pointer to reference counter in CPU memory. */
int *refcount_;
};
} // namespace cuda
namespace device {
using kfusion::cuda::DeviceMemory;
using kfusion::cuda::DeviceMemory2D;
} // namespace device
} // namespace kfusion
///////////////////// Inline implementations of DeviceMemory
///////////////////////////////////////////////
template
inline T *kfusion::cuda::DeviceMemory::ptr() {
return (T *) data_;
}
template
inline const T *kfusion::cuda::DeviceMemory::ptr() const {
return (const T *) data_;
}
template
inline kfusion::cuda::DeviceMemory::operator kfusion::cuda::PtrSz() const {
PtrSz result;
result.data = (U *) ptr();
result.size = sizeBytes_ / sizeof(U);
return result;
}
///////////////////// Inline implementations of DeviceMemory2D
///////////////////////////////////////////////
template
T *kfusion::cuda::DeviceMemory2D::ptr(int y_arg) {
return (T *) ((char *) data_ + y_arg * step_);
}
template
const T *kfusion::cuda::DeviceMemory2D::ptr(int y_arg) const {
return (const T *) ((const char *) data_ + y_arg * step_);
}
template
kfusion::cuda::DeviceMemory2D::operator kfusion::cuda::PtrStep() const {
PtrStep result;
result.data = (U *) ptr();
result.step = step_;
return result;
}
template
kfusion::cuda::DeviceMemory2D::operator kfusion::cuda::PtrStepSz() const {
PtrStepSz result;
result.data = (U *) ptr();
result.step = step_;
result.cols = colsBytes_ / sizeof(U);
result.rows = rows_;
return result;
}
================================================
FILE: include/kfusion/cuda/imgproc.hpp
================================================
#pragma once
/* kinfu includes */
#include
/* pcl includes */
#include
namespace kfusion {
namespace cuda {
KF_EXPORTS void depthBilateralFilter(const Depth &in, Depth &out, int ksz, float sigma_spatial, float sigma_depth);
KF_EXPORTS void depthTruncation(Depth &depth, float threshold);
KF_EXPORTS void depthBuildPyramid(const Depth &depth, Depth &pyramid, float sigma_depth);
KF_EXPORTS void computeNormalsAndMaskDepth(const Intr &intr, Depth &depth, Normals &normals);
KF_EXPORTS void computePointNormals(const Intr &intr, const Depth &depth, Cloud &points, Normals &normals);
KF_EXPORTS void computeDists(const Depth &depth, Dists &dists, const Intr &intr);
KF_EXPORTS void resizeDepthNormals(const Depth &depth, const Normals &normals, Depth &depth_out, Normals &normals_out);
KF_EXPORTS void resizePointsNormals(const Cloud &points, const Normals &normals, Cloud &points_out,
Normals &normals_out);
KF_EXPORTS void waitAllDefaultStream();
KF_EXPORTS void renderTangentColors(const Normals &normals, Image &image);
/** \brief rasterises the canonical surface
* \param[in] intr camera intrinsics
* \param[in] cam2vol camera -> volume coordinate transform
* \param[in] surface surface to rasterise
* \param[out] points_out frame with the currently visible geometry
* \param[out] normals_out frame with the normals of the currently visible geometry
*/
KF_EXPORTS void rasteriseSurface(const Intr &intr, const Affine3f &vol2cam, const Surface &s, Cloud &vertices_out,
Normals &normals_out);
KF_EXPORTS void renderImage(const Depth &depth, const Normals &normals, const Intr &intr, const Vec3f &light_pose,
Image &image);
KF_EXPORTS void renderImage(const Cloud &points, const Normals &normals, const Intr &intr, const Vec3f &light_pose,
Image &image);
} // namespace cuda
} // namespace kfusion
================================================
FILE: include/kfusion/cuda/kernel_containers.hpp
================================================
#pragma once
#if defined(__CUDACC__)
#define __kf_hdevice__ __host__ __device__ __forceinline__
#define __kf_device__ __device__ __forceinline__
#else
#define __kf_hdevice__
#define __kf_device__
#endif
#include
namespace kfusion {
namespace cuda {
template
struct DevPtr {
typedef T elem_type;
const static size_t elem_size = sizeof(elem_type);
T *data;
__kf_hdevice__ DevPtr() : data(0) {}
__kf_hdevice__ DevPtr(T *data_arg) : data(data_arg) {}
__kf_hdevice__ size_t elemSize() const { return elem_size; }
__kf_hdevice__ operator T *() { return data; }
__kf_hdevice__ operator const T *() const { return data; }
};
template
struct PtrSz : public DevPtr {
__kf_hdevice__ PtrSz() : size(0) {}
__kf_hdevice__ PtrSz(T *data_arg, size_t size_arg) : DevPtr(data_arg), size(size_arg) {}
size_t size;
};
template
struct PtrStep : public DevPtr {
__kf_hdevice__ PtrStep() : step(0) {}
__kf_hdevice__ PtrStep(T *data_arg, size_t step_arg) : DevPtr(data_arg), step(step_arg) {}
/** \brief stride between two consecutive rows in bytes. Step is stored always
* and everywhere in bytes!!! */
size_t step;
__kf_hdevice__ T *ptr(int y = 0) { return (T *) ((char *) DevPtr::data + y * step); }
__kf_hdevice__ const T *ptr(int y = 0) const { return (const T *) ((const char *) DevPtr::data + y * step); }
__kf_hdevice__ T &operator()(int y, int x) { return ptr(y)[x]; }
__kf_hdevice__ const T &operator()(int y, int x) const { return ptr(y)[x]; }
};
template
struct PtrStepSz : public PtrStep {
__kf_hdevice__ PtrStepSz() : cols(0), rows(0) {}
__kf_hdevice__ PtrStepSz(int rows_arg, int cols_arg, T *data_arg, size_t step_arg)
: PtrStep(data_arg, step_arg), cols(cols_arg), rows(rows_arg) {}
int cols;
int rows;
};
} // namespace cuda
namespace device {
using kfusion::cuda::PtrStep;
using kfusion::cuda::PtrStepSz;
using kfusion::cuda::PtrSz;
} // namespace device
} // namespace kfusion
namespace kf = kfusion;
================================================
FILE: include/kfusion/cuda/marching_cubes.hpp
================================================
#pragma once
/* eigen includes */
#include
/* kinfu includes */
#include
#include
/* pcl includes */
#include
namespace kfusion {
namespace cuda {
/** \brief MarchingCubes implements MarchingCubes functionality for TSDF volume on GPU
* \author Anatoly Baskeheev, Itseez Ltd, (myname.mysurname@mycompany.com)
*/
class KF_EXPORTS MarchingCubes {
public:
/** \brief Default size for triangles buffer */
enum { POINTS_PER_TRIANGLE = 3, DEFAULT_TRIANGLES_BUFFER_SIZE = 2 * 1000 * 1000 * POINTS_PER_TRIANGLE };
/** \brief Smart pointer. */
typedef boost::shared_ptr Ptr;
/** \brief Default constructor */
MarchingCubes();
/** \brief Destructor */
~MarchingCubes();
/* set volume pose */
void setPose(const cv::Affine3f& pose);
/* run mc to extract the zero level set */
Surface run(const TsdfVolume& volume, DeviceArray& vertices_buffer,
DeviceArray& normals_buffer);
private:
/** \brief Edge table for marching cubes */
DeviceArray edgeTable_;
/** \brief Number of vertextes table for marching cubes */
DeviceArray numVertsTable_;
/** \brief Triangles table for marching cubes */
DeviceArray triTable_;
/** \brief Temporary buffer used by marching cubes (first row stores occuped voxes id, second number of vetexes,
* third poits offsets */
DeviceArray2D occupied_voxels_buffer_;
/* volume pose */
cv::Affine3f pose;
};
} // namespace cuda
} // namespace kfusion
================================================
FILE: include/kfusion/cuda/projective_icp.hpp
================================================
#pragma once
#include
namespace kfusion {
namespace cuda {
class ProjectiveICP {
public:
enum { MAX_PYRAMID_LEVELS = 4 };
typedef std::vector DepthPyr;
typedef std::vector PointsPyr;
typedef std::vector NormalsPyr;
ProjectiveICP();
virtual ~ProjectiveICP();
float getDistThreshold() const;
void setDistThreshold(float distance);
float getAngleThreshold() const;
void setAngleThreshold(float angle);
void setIterationsNum(const std::vector &iters);
int getUsedLevelsNum() const;
virtual bool estimateTransform(Affine3f &affine, const Intr &intr, const Frame &curr, const Frame &prev);
/** The function takes masked depth, i.e. it assumes for performance reasons
* that "if depth(y,x) is not zero, then normals(y,x) surely is not qnan" */
virtual bool estimateTransform(Affine3f &affine, const Intr &intr, const DepthPyr &dcurr, const NormalsPyr ncurr,
const DepthPyr dprev, const NormalsPyr nprev);
virtual bool estimateTransform(Affine3f &affine, const Intr &intr, const PointsPyr &vcurr, const NormalsPyr ncurr,
const PointsPyr vprev, const NormalsPyr nprev);
// static Vec3f rodrigues2(const Mat3f& matrix);
private:
std::vector iters_;
float angle_thres_;
float dist_thres_;
DeviceArray2D buffer_;
struct StreamHelper;
cv::Ptr shelp_;
};
} // namespace cuda
} // namespace kfusion
================================================
FILE: include/kfusion/cuda/temp_utils.hpp
================================================
#pragma once
#include
#include
#define FULL_MASK 0xffffffff
namespace kfusion {
namespace device {
template
__kf_hdevice__ void swap(T &a, T &b) {
T c(a);
a = b;
b = c;
}
template
struct numeric_limits;
template <>
struct numeric_limits {
__kf_device__ static float quiet_NaN() { return __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ };
__kf_device__ static float epsilon() { return 1.192092896e-07f /*FLT_EPSILON*/; };
__kf_device__ static float min() { return 1.175494351e-38f /*FLT_MIN*/; };
__kf_device__ static float max() { return 3.402823466e+38f /*FLT_MAX*/; };
};
template <>
struct numeric_limits {
__kf_device__ static unsigned short max() { return USHRT_MAX; };
};
__kf_device__ float dot(const float3 &v1, const float3 &v2) {
return __fmaf_rn(v1.x, v2.x, __fmaf_rn(v1.y, v2.y, v1.z * v2.z));
}
__kf_device__ float3 &operator+=(float3 &vec, const float &v) {
vec.x += v;
vec.y += v;
vec.z += v;
return vec;
}
__kf_device__ float3 &operator+=(float3 &v1, const float3 &v2) {
v1.x += v2.x;
v1.y += v2.y;
v1.z += v2.z;
return v1;
}
__kf_device__ float3 operator+(const float3 &v1, const float3 &v2) {
return make_float3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
}
__kf_device__ float3 operator*(const float3 &v1, const float3 &v2) {
return make_float3(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z);
}
__kf_hdevice__ float3 operator*(const float3 &v1, const int3 &v2) {
return make_float3(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z);
}
__kf_device__ float3 operator/(const float3 &v1, const float3 &v2) {
return make_float3(v1.x / v2.x, v1.y / v2.y, v1.z / v2.z);
}
__kf_hdevice__ float3 operator/(const float &v, const float3 &vec) {
return make_float3(v / vec.x, v / vec.y, v / vec.z);
}
__kf_device__ float3 &operator*=(float3 &vec, const float &v) {
vec.x *= v;
vec.y *= v;
vec.z *= v;
return vec;
}
__kf_hdevice__ float3 operator-(const float3 &v1, const float3 &v2) {
return make_float3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
}
__kf_hdevice__ float3 operator*(const float3 &v1, const float &v) { return make_float3(v1.x * v, v1.y * v, v1.z * v); }
__kf_hdevice__ float3 operator*(const float &v, const float3 &v1) { return make_float3(v1.x * v, v1.y * v, v1.z * v); }
__kf_device__ float norm(const float3 &v) { return sqrt(dot(v, v)); }
__kf_device__ float norm_sqr(const float3 &v) { return dot(v, v); }
__kf_device__ float3 normalized(const float3 &v) { return v * rsqrt(dot(v, v)); }
__kf_hdevice__ float3 cross(const float3 &v1, const float3 &v2) {
return make_float3(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x);
}
__kf_device__ void computeRoots2(const float &b, const float &c, float3 &roots) {
roots.x = 0.f;
float d = b * b - 4.f * c;
if (d < 0.f) // no real roots!!!! THIS SHOULD NOT HAPPEN!
d = 0.f;
float sd = sqrtf(d);
roots.z = 0.5f * (b + sd);
roots.y = 0.5f * (b - sd);
}
__kf_device__ void computeRoots3(float c0, float c1, float c2, float3 &roots) {
if (fabsf(c0) < numeric_limits::epsilon()) // one root is 0 -> quadratic equation
{
computeRoots2(c2, c1, roots);
} else {
const float s_inv3 = 1.f / 3.f;
const float s_sqrt3 = sqrtf(3.f);
// Construct the parameters used in classifying the roots of the equation
// and in solving the equation for the roots in closed form.
float c2_over_3 = c2 * s_inv3;
float a_over_3 = (c1 - c2 * c2_over_3) * s_inv3;
if (a_over_3 > 0.f)
a_over_3 = 0.f;
float half_b = 0.5f * (c0 + c2_over_3 * (2.f * c2_over_3 * c2_over_3 - c1));
float q = half_b * half_b + a_over_3 * a_over_3 * a_over_3;
if (q > 0.f)
q = 0.f;
// Compute the eigenvalues by solving for the roots of the polynomial.
float rho = sqrtf(-a_over_3);
float theta = atan2f(sqrtf(-q), half_b) * s_inv3;
float cos_theta = __cosf(theta);
float sin_theta = __sinf(theta);
roots.x = c2_over_3 + 2.f * rho * cos_theta;
roots.y = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta);
roots.z = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta);
// Sort in increasing order.
if (roots.x >= roots.y)
swap(roots.x, roots.y);
if (roots.y >= roots.z) {
swap(roots.y, roots.z);
if (roots.x >= roots.y)
swap(roots.x, roots.y);
}
if (roots.x <= 0) // eigenval for symetric positive semi-definite matrix can
// not be negative! Set it to 0
computeRoots2(c2, c1, roots);
}
}
struct Eigen33 {
public:
template
struct MiniMat {
float3 data[Rows];
__kf_hdevice__ float3 &operator[](int i) { return data[i]; }
__kf_hdevice__ const float3 &operator[](int i) const { return data[i]; }
};
typedef MiniMat<3> Mat33;
typedef MiniMat<4> Mat43;
static __kf_device__ float3 unitOrthogonal(const float3 &src) {
float3 perp;
/* Let us compute the crossed product of *this with a vector
* that is not too close to being colinear to *this.
*/
/* unless the x and y coords are both close to zero, we can
* simply take ( -y, x, 0 ) and normalize it.
*/
if (!isMuchSmallerThan(src.x, src.z) || !isMuchSmallerThan(src.y, src.z)) {
float invnm = rsqrtf(src.x * src.x + src.y * src.y);
perp.x = -src.y * invnm;
perp.y = src.x * invnm;
perp.z = 0.0f;
}
/* if both x and y are close to zero, then the vector is close
* to the z-axis, so it's far from colinear to the x-axis for instance.
* So we take the crossed product with (1,0,0) and normalize it.
*/
else {
float invnm = rsqrtf(src.z * src.z + src.y * src.y);
perp.x = 0.0f;
perp.y = -src.z * invnm;
perp.z = src.y * invnm;
}
return perp;
}
__kf_device__ Eigen33(volatile float *mat_pkg_arg) : mat_pkg(mat_pkg_arg) {}
__kf_device__ void compute(Mat33 &tmp, Mat33 &vec_tmp, Mat33 &evecs, float3 &evals) {
// Scale the matrix so its entries are in [-1,1]. The scaling is applied
// only when at least one matrix entry has magnitude larger than 1.
float max01 = fmaxf(fabsf(mat_pkg[0]), fabsf(mat_pkg[1]));
float max23 = fmaxf(fabsf(mat_pkg[2]), fabsf(mat_pkg[3]));
float max45 = fmaxf(fabsf(mat_pkg[4]), fabsf(mat_pkg[5]));
float m0123 = fmaxf(max01, max23);
float scale = fmaxf(max45, m0123);
if (scale <= numeric_limits::min())
scale = 1.f;
mat_pkg[0] /= scale;
mat_pkg[1] /= scale;
mat_pkg[2] /= scale;
mat_pkg[3] /= scale;
mat_pkg[4] /= scale;
mat_pkg[5] /= scale;
// The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
// eigenvalues are the roots to this equation, all guaranteed to be
// real-valued, because the matrix is symmetric.
float c0 = m00() * m11() * m22() + 2.f * m01() * m02() * m12() - m00() * m12() * m12() - m11() * m02() * m02() -
m22() * m01() * m01();
float c1 = m00() * m11() - m01() * m01() + m00() * m22() - m02() * m02() + m11() * m22() - m12() * m12();
float c2 = m00() + m11() + m22();
computeRoots3(c0, c1, c2, evals);
if (evals.z - evals.x <= numeric_limits::epsilon()) {
evecs[0] = make_float3(1.f, 0.f, 0.f);
evecs[1] = make_float3(0.f, 1.f, 0.f);
evecs[2] = make_float3(0.f, 0.f, 1.f);
} else if (evals.y - evals.x <= numeric_limits::epsilon()) {
// first and second equal
tmp[0] = row0();
tmp[1] = row1();
tmp[2] = row2();
tmp[0].x -= evals.z;
tmp[1].y -= evals.z;
tmp[2].z -= evals.z;
vec_tmp[0] = cross(tmp[0], tmp[1]);
vec_tmp[1] = cross(tmp[0], tmp[2]);
vec_tmp[2] = cross(tmp[1], tmp[2]);
float len1 = dot(vec_tmp[0], vec_tmp[0]);
float len2 = dot(vec_tmp[1], vec_tmp[1]);
float len3 = dot(vec_tmp[2], vec_tmp[2]);
if (len1 >= len2 && len1 >= len3) {
evecs[2] = vec_tmp[0] * rsqrtf(len1);
} else if (len2 >= len1 && len2 >= len3) {
evecs[2] = vec_tmp[1] * rsqrtf(len2);
} else {
evecs[2] = vec_tmp[2] * rsqrtf(len3);
}
evecs[1] = unitOrthogonal(evecs[2]);
evecs[0] = cross(evecs[1], evecs[2]);
} else if (evals.z - evals.y <= numeric_limits::epsilon()) {
// second and third equal
tmp[0] = row0();
tmp[1] = row1();
tmp[2] = row2();
tmp[0].x -= evals.x;
tmp[1].y -= evals.x;
tmp[2].z -= evals.x;
vec_tmp[0] = cross(tmp[0], tmp[1]);
vec_tmp[1] = cross(tmp[0], tmp[2]);
vec_tmp[2] = cross(tmp[1], tmp[2]);
float len1 = dot(vec_tmp[0], vec_tmp[0]);
float len2 = dot(vec_tmp[1], vec_tmp[1]);
float len3 = dot(vec_tmp[2], vec_tmp[2]);
if (len1 >= len2 && len1 >= len3) {
evecs[0] = vec_tmp[0] * rsqrtf(len1);
} else if (len2 >= len1 && len2 >= len3) {
evecs[0] = vec_tmp[1] * rsqrtf(len2);
} else {
evecs[0] = vec_tmp[2] * rsqrtf(len3);
}
evecs[1] = unitOrthogonal(evecs[0]);
evecs[2] = cross(evecs[0], evecs[1]);
} else {
tmp[0] = row0();
tmp[1] = row1();
tmp[2] = row2();
tmp[0].x -= evals.z;
tmp[1].y -= evals.z;
tmp[2].z -= evals.z;
vec_tmp[0] = cross(tmp[0], tmp[1]);
vec_tmp[1] = cross(tmp[0], tmp[2]);
vec_tmp[2] = cross(tmp[1], tmp[2]);
float len1 = dot(vec_tmp[0], vec_tmp[0]);
float len2 = dot(vec_tmp[1], vec_tmp[1]);
float len3 = dot(vec_tmp[2], vec_tmp[2]);
float mmax[3];
unsigned int min_el = 2;
unsigned int max_el = 2;
if (len1 >= len2 && len1 >= len3) {
mmax[2] = len1;
evecs[2] = vec_tmp[0] * rsqrtf(len1);
} else if (len2 >= len1 && len2 >= len3) {
mmax[2] = len2;
evecs[2] = vec_tmp[1] * rsqrtf(len2);
} else {
mmax[2] = len3;
evecs[2] = vec_tmp[2] * rsqrtf(len3);
}
tmp[0] = row0();
tmp[1] = row1();
tmp[2] = row2();
tmp[0].x -= evals.y;
tmp[1].y -= evals.y;
tmp[2].z -= evals.y;
vec_tmp[0] = cross(tmp[0], tmp[1]);
vec_tmp[1] = cross(tmp[0], tmp[2]);
vec_tmp[2] = cross(tmp[1], tmp[2]);
len1 = dot(vec_tmp[0], vec_tmp[0]);
len2 = dot(vec_tmp[1], vec_tmp[1]);
len3 = dot(vec_tmp[2], vec_tmp[2]);
if (len1 >= len2 && len1 >= len3) {
mmax[1] = len1;
evecs[1] = vec_tmp[0] * rsqrtf(len1);
min_el = len1 <= mmax[min_el] ? 1 : min_el;
max_el = len1 > mmax[max_el] ? 1 : max_el;
} else if (len2 >= len1 && len2 >= len3) {
mmax[1] = len2;
evecs[1] = vec_tmp[1] * rsqrtf(len2);
min_el = len2 <= mmax[min_el] ? 1 : min_el;
max_el = len2 > mmax[max_el] ? 1 : max_el;
} else {
mmax[1] = len3;
evecs[1] = vec_tmp[2] * rsqrtf(len3);
min_el = len3 <= mmax[min_el] ? 1 : min_el;
max_el = len3 > mmax[max_el] ? 1 : max_el;
}
tmp[0] = row0();
tmp[1] = row1();
tmp[2] = row2();
tmp[0].x -= evals.x;
tmp[1].y -= evals.x;
tmp[2].z -= evals.x;
vec_tmp[0] = cross(tmp[0], tmp[1]);
vec_tmp[1] = cross(tmp[0], tmp[2]);
vec_tmp[2] = cross(tmp[1], tmp[2]);
len1 = dot(vec_tmp[0], vec_tmp[0]);
len2 = dot(vec_tmp[1], vec_tmp[1]);
len3 = dot(vec_tmp[2], vec_tmp[2]);
if (len1 >= len2 && len1 >= len3) {
mmax[0] = len1;
evecs[0] = vec_tmp[0] * rsqrtf(len1);
min_el = len3 <= mmax[min_el] ? 0 : min_el;
max_el = len3 > mmax[max_el] ? 0 : max_el;
} else if (len2 >= len1 && len2 >= len3) {
mmax[0] = len2;
evecs[0] = vec_tmp[1] * rsqrtf(len2);
min_el = len3 <= mmax[min_el] ? 0 : min_el;
max_el = len3 > mmax[max_el] ? 0 : max_el;
} else {
mmax[0] = len3;
evecs[0] = vec_tmp[2] * rsqrtf(len3);
min_el = len3 <= mmax[min_el] ? 0 : min_el;
max_el = len3 > mmax[max_el] ? 0 : max_el;
}
unsigned mid_el = 3 - min_el - max_el;
evecs[min_el] = normalized(cross(evecs[(min_el + 1) % 3], evecs[(min_el + 2) % 3]));
evecs[mid_el] = normalized(cross(evecs[(mid_el + 1) % 3], evecs[(mid_el + 2) % 3]));
}
// Rescale back to the original size.
evals *= scale;
}
private:
volatile float *mat_pkg;
__kf_device__ float m00() const { return mat_pkg[0]; }
__kf_device__ float m01() const { return mat_pkg[1]; }
__kf_device__ float m02() const { return mat_pkg[2]; }
__kf_device__ float m10() const { return mat_pkg[1]; }
__kf_device__ float m11() const { return mat_pkg[3]; }
__kf_device__ float m12() const { return mat_pkg[4]; }
__kf_device__ float m20() const { return mat_pkg[2]; }
__kf_device__ float m21() const { return mat_pkg[4]; }
__kf_device__ float m22() const { return mat_pkg[5]; }
__kf_device__ float3 row0() const { return make_float3(m00(), m01(), m02()); }
__kf_device__ float3 row1() const { return make_float3(m10(), m11(), m12()); }
__kf_device__ float3 row2() const { return make_float3(m20(), m21(), m22()); }
__kf_device__ static bool isMuchSmallerThan(float x, float y) {
// copied from /include/Eigen/src/Core/NumTraits.h
const float prec_sqr = numeric_limits::epsilon() * numeric_limits::epsilon();
return x * x <= prec_sqr * y * y;
}
};
struct Warp {
enum { LOG_WARP_SIZE = 5, WARP_SIZE = 1 << LOG_WARP_SIZE, STRIDE = WARP_SIZE };
/** \brief Returns the warp lane ID of the calling thread. */
static __kf_device__ unsigned int laneId() {
unsigned int ret;
asm("mov.u32 %0, %laneid;" : "=r"(ret));
return ret;
}
static __kf_device__ unsigned int id() {
int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
return tid >> LOG_WARP_SIZE;
}
static __kf_device__ int laneMaskLt() {
#if (__CUDA_ARCH__ >= 200)
unsigned int ret;
asm("mov.u32 %0, %lanemask_lt;" : "=r"(ret));
return ret;
#else
return 0xFFFFFFFF >> (32 - laneId());
#endif
}
static __kf_device__ int binaryExclScan(int ballot_mask) { return __popc(Warp::laneMaskLt() & ballot_mask); }
};
struct Block {
static __kf_device__ unsigned int stride() { return blockDim.x * blockDim.y * blockDim.z; }
static __kf_device__ int flattenedThreadId() {
return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
template
static __kf_device__ void reduce(volatile T *buffer, BinOp op) {
int tid = flattenedThreadId();
T val = buffer[tid];
if (CTA_SIZE >= 1024) {
if (tid < 512)
buffer[tid] = val = op(val, buffer[tid + 512]);
__syncthreads();
}
if (CTA_SIZE >= 512) {
if (tid < 256)
buffer[tid] = val = op(val, buffer[tid + 256]);
__syncthreads();
}
if (CTA_SIZE >= 256) {
if (tid < 128)
buffer[tid] = val = op(val, buffer[tid + 128]);
__syncthreads();
}
if (CTA_SIZE >= 128) {
if (tid < 64)
buffer[tid] = val = op(val, buffer[tid + 64]);
__syncthreads();
}
if (tid < 32) {
if (CTA_SIZE >= 64) {
buffer[tid] = val = op(val, buffer[tid + 32]);
}
if (CTA_SIZE >= 32) {
buffer[tid] = val = op(val, buffer[tid + 16]);
}
if (CTA_SIZE >= 16) {
buffer[tid] = val = op(val, buffer[tid + 8]);
}
if (CTA_SIZE >= 8) {
buffer[tid] = val = op(val, buffer[tid + 4]);
}
if (CTA_SIZE >= 4) {
buffer[tid] = val = op(val, buffer[tid + 2]);
}
if (CTA_SIZE >= 2) {
buffer[tid] = val = op(val, buffer[tid + 1]);
}
}
}
template
static __kf_device__ T reduce(volatile T *buffer, T init, BinOp op) {
int tid = flattenedThreadId();
T val = buffer[tid] = init;
__syncthreads();
if (CTA_SIZE >= 1024) {
if (tid < 512)
buffer[tid] = val = op(val, buffer[tid + 512]);
__syncthreads();
}
if (CTA_SIZE >= 512) {
if (tid < 256)
buffer[tid] = val = op(val, buffer[tid + 256]);
__syncthreads();
}
if (CTA_SIZE >= 256) {
if (tid < 128)
buffer[tid] = val = op(val, buffer[tid + 128]);
__syncthreads();
}
if (CTA_SIZE >= 128) {
if (tid < 64)
buffer[tid] = val = op(val, buffer[tid + 64]);
__syncthreads();
}
if (tid < 32) {
#if defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 300 && 0
if (CTA_SIZE >= 64)
val = op(val, buffer[tid + 32]);
if (CTA_SIZE >= 32)
val = op(val, __shfl_xor(val, 16));
if (CTA_SIZE >= 16)
val = op(val, __shfl_xor(val, 8));
if (CTA_SIZE >= 8)
val = op(val, __shfl_xor(val, 4));
if (CTA_SIZE >= 4)
val = op(val, __shfl_xor(val, 2));
if (CTA_SIZE >= 2)
buffer[tid] = op(val, __shfl_xor(val, 1));
#else
if (CTA_SIZE >= 64) {
buffer[tid] = val = op(val, buffer[tid + 32]);
}
if (CTA_SIZE >= 32) {
buffer[tid] = val = op(val, buffer[tid + 16]);
}
if (CTA_SIZE >= 16) {
buffer[tid] = val = op(val, buffer[tid + 8]);
}
if (CTA_SIZE >= 8) {
buffer[tid] = val = op(val, buffer[tid + 4]);
}
if (CTA_SIZE >= 4) {
buffer[tid] = val = op(val, buffer[tid + 2]);
}
if (CTA_SIZE >= 2) {
buffer[tid] = val = op(val, buffer[tid + 1]);
}
#endif
}
__syncthreads();
return buffer[0];
}
};
struct Emulation {
static __kf_device__ int warp_reduce(volatile int *ptr, const unsigned int tid) {
const unsigned int lane = tid & 31; // index of thread in warp (0..31)
if (lane < 16) {
int partial = ptr[tid];
ptr[tid] = partial = partial + ptr[tid + 16];
ptr[tid] = partial = partial + ptr[tid + 8];
ptr[tid] = partial = partial + ptr[tid + 4];
ptr[tid] = partial = partial + ptr[tid + 2];
ptr[tid] = partial = partial + ptr[tid + 1];
}
return ptr[tid - lane];
}
static __kf_device__ int Ballot(int predicate, volatile int *cta_buffer) {
#if __CUDA_ARCH__ >= 200
(void) cta_buffer;
return __ballot_sync(FULL_MASK, predicate);
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
return warp_reduce(cta_buffer, tid);
#endif
}
static __kf_device__ bool All(int predicate, volatile int *cta_buffer) {
#if __CUDA_ARCH__ >= 200
(void) cta_buffer;
return __all_sync(FULL_MASK, predicate);
#else
int tid = Block::flattenedThreadId();
cta_buffer[tid] = predicate ? 1 : 0;
return warp_reduce(cta_buffer, tid) == 32;
#endif
}
};
} // namespace device
} // namespace kfusion
================================================
FILE: include/kfusion/cuda/texture_binder.hpp
================================================
#pragma once
#include
#include
namespace kfusion {
namespace cuda {
class TextureBinder {
public:
template
TextureBinder(const DeviceArray2D &arr, const struct texture &tex) : texref(&tex) {
cudaChannelFormatDesc desc = cudaCreateChannelDesc();
cudaSafeCall(cudaBindTexture2D(0, tex, arr.ptr(), desc, arr.cols(), arr.rows(), arr.step()));
}
template
TextureBinder(const DeviceArray &arr, const struct texture &tex) : texref(&tex) {
cudaChannelFormatDesc desc = cudaCreateChannelDesc();
cudaSafeCall(cudaBindTexture(0, tex, arr.ptr(), desc, arr.sizeBytes()));
}
template
TextureBinder(const PtrStepSz &arr, const struct texture &tex) : texref(&tex) {
cudaChannelFormatDesc desc = cudaCreateChannelDesc();
cudaSafeCall(cudaBindTexture2D(0, tex, arr.data, desc, arr.cols, arr.rows, arr.step));
}
template
TextureBinder(const A &arr, const struct texture &tex, const cudaChannelFormatDesc &desc)
: texref(&tex) {
cudaSafeCall(cudaBindTexture2D(0, tex, arr.data, desc, arr.cols, arr.rows, arr.step));
}
template
TextureBinder(const PtrSz &arr, const struct texture &tex) : texref(&tex) {
cudaChannelFormatDesc desc = cudaCreateChannelDesc();
cudaSafeCall(cudaBindTexture(0, tex, arr.data, desc, arr.size * arr.elemSize()));
}
~TextureBinder() { cudaSafeCall(cudaUnbindTexture(texref)); }
private:
const struct textureReference *texref;
};
} // namespace cuda
namespace device {
using kfusion::cuda::TextureBinder;
}
} // namespace kfusion
================================================
FILE: include/kfusion/cuda/tsdf_volume.hpp
================================================
#pragma once
/* kinfu includes */
#include
/* pcl includes */
#include
/* sobfu includes */
#include
/* sys headers */
#include
namespace kfusion {
namespace cuda {
class KF_EXPORTS TsdfVolume {
public:
TsdfVolume(const Params ¶ms);
virtual ~TsdfVolume();
void create(const Vec3i &dims);
Vec3i getDims() const;
Vec3f getVoxelSize() const;
const CudaData data() const;
CudaData data();
Vec3f getSize() const;
void setSize(const Vec3f &size);
float getTruncDist() const;
void setTruncDist(float &distance);
float getEta() const;
void setEta(float &eta);
float getMaxWeight() const;
void setMaxWeight(float &weight);
Affine3f getPose() const;
void setPose(const Affine3f &pose);
float getRaycastStepFactor() const;
void setRaycastStepFactor(float &factor);
float getGradientDeltaFactor() const;
void setGradientDeltaFactor(float &factor);
Vec3i getGridOrigin() const;
void setGridOrigin(const Vec3i &origin);
virtual void clear();
void swap(CudaData &data);
virtual void applyAffine(const Affine3f &affine);
virtual void integrate(const TsdfVolume &phi_n_psi);
virtual void integrate(const Dists &dists, const Affine3f &camera_pose, const Intr &intr);
virtual void initBox(const float3 &b);
virtual void initEllipsoid(const float3 &r);
virtual void initPlane(const float &z);
virtual void initSphere(const float3 ¢re, const float &radius);
virtual void initTorus(const float2 &t);
void print_sdf_values();
struct Entry {
typedef unsigned short half;
half tsdf;
int weight;
static float half2float(half value);
static half float2half(float value);
};
private:
CudaData data_;
float trunc_dist_;
float eta_;
float max_weight_;
Vec3i dims_;
Vec3f size_;
Affine3f pose_;
float gradient_delta_factor_;
float raycast_step_factor_;
};
} // namespace cuda
} // namespace kfusion
================================================
FILE: include/kfusion/exports.hpp
================================================
#pragma once
#if (defined WIN32 || defined _WIN32 || defined WINCE) && defined KFUSION_API_EXPORTS
#define KF_EXPORTS __declspec(dllexport)
#else
#define KF_EXPORTS
#endif
================================================
FILE: include/kfusion/internal.hpp
================================================
#pragma once
/* kinfu includes */
#include
#include
/* pcl includes */
#include
//#define USE_DEPTH
struct Mat4f {
float4 data[4];
};
namespace kfusion {
namespace device {
typedef float4 Normal;
typedef float4 Point;
typedef unsigned short ushort;
typedef unsigned char uchar;
typedef PtrStepSz Dists;
typedef DeviceArray2D Depth;
typedef DeviceArray2D Normals;
typedef DeviceArray2D Points;
typedef DeviceArray2D Image;
typedef float3 *Displacements;
typedef DeviceArray Vertices;
typedef Vertices Norms;
/*
* struct which holds a surface as triangles
*/
struct Surface {
Vertices vertices;
Norms normals;
};
struct float8 {
float x, y, z, w, c1, c2, c3, c4;
};
typedef float4 PointType;
typedef int3 Vec3i;
typedef float3 Vec3f;
struct Mat3f {
float3 data[3];
};
struct Aff3f {
Mat3f R;
Vec3f t;
};
struct TsdfVolume {
public:
float2 *const data;
const int3 dims;
const float3 voxel_size;
const float trunc_dist;
const float eta;
const float max_weight;
TsdfVolume(float2 *const data, int3 dims, float3 voxel_size, float trunc_dist, float eta, float max_weight);
// TsdfVolume(const TsdfVolume &);
TsdfVolume &operator=(const TsdfVolume &);
__kf_device__ float2 *operator()(int x, int y, int z);
__kf_device__ const float2 *operator()(int x, int y, int z) const;
__kf_device__ float2 *beg(int x, int y) const;
__kf_device__ float2 *zstep(float2 *const ptr) const;
};
struct Projector {
float2 f, c;
Projector() {}
Projector(float fx, float fy, float cx, float cy);
__kf_device__ float2 operator()(const float3 &p) const;
};
struct Reprojector {
Reprojector() {}
Reprojector(float fx, float fy, float cx, float cy);
float2 finv, c;
__kf_device__ float3 operator()(int x, int y, float z) const;
};
struct CubeIndexEstimator {
CubeIndexEstimator(const TsdfVolume &vol) : volume(vol) { isoValue = 0.f; }
const TsdfVolume volume;
float isoValue;
__kf_device__ int computeCubeIndex(int x, int y, int z, float f[8]) const;
__kf_device__ void readTsdf(int x, int y, int z, float &f, float &weight) const;
};
struct OccupiedVoxels : public CubeIndexEstimator {
OccupiedVoxels(const TsdfVolume &vol) : CubeIndexEstimator({vol}) {}
enum {
CTA_SIZE_X = 32,
CTA_SIZE_Y = 8,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
LOG_WARP_SIZE = 5,
WARP_SIZE = 1 << LOG_WARP_SIZE,
WARPS_COUNT = CTA_SIZE / WARP_SIZE,
};
mutable int *voxels_indices;
mutable int *vertices_number;
int max_size;
__kf_device__ void operator()() const;
};
struct TrianglesGenerator : public CubeIndexEstimator {
TrianglesGenerator(const TsdfVolume &vol) : CubeIndexEstimator({vol}) {}
enum { CTA_SIZE = 256, MAX_GRID_SIZE_X = 65536 };
const int *occupied_voxels;
const int *vertex_ofssets;
int voxels_count;
float3 cell_size;
Aff3f pose;
mutable device::PointType *outputVertices;
mutable device::PointType *outputNormals;
__kf_device__ void operator()() const;
__kf_device__ void store_point(float4 *ptr, int index, const float3 &vertex) const;
__kf_device__ float3 get_node_coo(int x, int y, int z) const;
__kf_device__ float3 vertex_interp(float3 p0, float3 p1, float f0, float f1) const;
}; // namespace device
struct ComputeIcpHelper {
struct Policy;
struct PageLockHelper {
float *data;
PageLockHelper();
~PageLockHelper();
};
float min_cosine;
float dist2_thres;
Aff3f aff;
float rows, cols;
float2 f, c, finv;
PtrStep dcurr;
PtrStep ncurr;
PtrStep vcurr;
ComputeIcpHelper(float dist_thres, float angle_thres);
void setLevelIntr(int level_index, float fx, float fy, float cx, float cy);
void operator()(const Depth &dprev, const Normals &nprev, DeviceArray2D &buffer, float *data,
cudaStream_t stream);
void operator()(const Points &vprev, const Normals &nprev, DeviceArray2D &buffer, float *data,
cudaStream_t stream);
static void allocate_buffer(DeviceArray2D &buffer, int partials_count = -1);
// private:
__kf_device__ int find_coresp(int x, int y, float3 &n, float3 &d, float3 &s) const;
__kf_device__ void partial_reduce(const float row[7], PtrStep &partial_buffer) const;
__kf_device__ float2 proj(const float3 &p) const;
__kf_device__ float3 reproj(float x, float y, float z) const;
};
/*
* TSDF FUNCTIONS
*/
void clear_volume(TsdfVolume &volume);
void integrate(TsdfVolume &phi_global, TsdfVolume &phi_n_psi);
void integrate(const Dists &depth, TsdfVolume &volume, const Aff3f &aff, const Projector &proj);
void init_box(TsdfVolume &volume, const float3 &b);
void init_ellipsoid(TsdfVolume &volume, const float3 &r);
void init_plane(TsdfVolume &volume, const float &z);
void init_sphere(TsdfVolume &volume, const float3 ¢re, const float &radius);
void init_torus(TsdfVolume &volume, const float2 &t);
/*
* MARCHING CUBES FUNCTIONS
*/
/** \brief binds marching cubes tables to texture references */
void bindTextures(const int *edgeBuf, const int *triBuf, const int *numVertsBuf);
/** \brief unbinds */
void unbindTextures();
/** \brief scans TSDF volume and retrieves occuped voxes
* \param[in] volume TSDF volume
* \param[out] occupied_voxels buffer for occupied voxels; the function fills the first row with voxel id's and second
* row with the no. of vertices
* \return number of voxels in the buffer
*/
int getOccupiedVoxels(const TsdfVolume &volume, DeviceArray2D &occupied_voxels);
/** \brief computes total number of vertices for all voxels and offsets of vertices in final triangle array
* \param[out] occupied_voxels buffer with occupied voxels; the function fills 3rd only with offsets
* \return total number of vertexes
*/
int computeOffsetsAndTotalVertices(DeviceArray2D &occupied_voxels);
/** \brief generates final triangle array
* \param[in] volume TSDF volume
* \param[in] occupied_voxels occupied voxel ids (1st row), number of vertices (2nd row), offsets (3rd row).
* \param[in] volume_size volume size in meters
* \param[out] output triangle array
*/
void generateTriangles(const TsdfVolume &volume, const DeviceArray2D &occupied_voxels, const float3 &volume_size,
const Aff3f &pose, DeviceArray &outputVertices,
DeviceArray &outputNormals);
/*
* IMAGE PROCESSING FUNCTIONS
*/
void compute_dists(const Depth &depth, Dists dists, float2 f, float2 c);
void truncateDepth(Depth &depth, float max_dist /*meters*/);
void bilateralFilter(const Depth &src, Depth &dst, int kernel_size, float sigma_spatial, float sigma_depth);
void depthPyr(const Depth &source, Depth &pyramid, float sigma_depth);
void resizeDepthNormals(const Depth &depth, const Normals &normals, Depth &depth_out, Normals &normals_out);
void resizePointsNormals(const Points &points, const Normals &normals, Points &points_out, Normals &normals_out);
void computeNormalsAndMaskDepth(const Reprojector &reproj, Depth &depth, Normals &normals);
void computePointNormals(const Reprojector &reproj, const Depth &depth, Points &points, Normals &normals);
/** \brief rasterises the surface extracted via marching cubes
* \param[in] proj
* \param[in] surface
* \param[out] vertices_out
* \paraim[out] normals_out
*/
void rasteriseSurface(const Projector &proj, const Aff3f &vol2cam, const Surface &surface, Points &vertices_out,
Normals &normals_out);
} // namespace device
} // namespace kfusion
================================================
FILE: include/kfusion/kinfu.hpp
================================================
#pragma once
/* boost includes */
#include
/* kinfu includes */
#include
#include
#include
#include
/* pcl includes */
#include
#include
#include
#include
#include
/* sys headers */
#include
#include
namespace kfusion {
namespace cuda {
KF_EXPORTS int getCudaEnabledDeviceCount();
KF_EXPORTS void setDevice(int device);
KF_EXPORTS std::string getDeviceName(int device);
KF_EXPORTS bool checkIfPreFermiGPU(int device);
KF_EXPORTS void printCudaDeviceInfo(int device);
KF_EXPORTS void printShortCudaDeviceInfo(int device);
} // namespace cuda
struct KF_EXPORTS KinFuParams {
static KinFuParams default_params();
int cols; // pixels
int rows; // pixels
Intr intr; // Camera parameters
Vec3i volume_dims; // number of voxels
Vec3f volume_size; // meters
Affine3f volume_pose; // meters, inital pose
float bilateral_sigma_depth; // meters
float bilateral_sigma_spatial; // pixels
int bilateral_kernel_size; // pixels
float icp_truncate_depth_dist; // meters
float icp_dist_thres; // meters
float icp_angle_thres; // radians
std::vector icp_iter_num; // iterations for level index 0,1,..,3
float tsdf_min_camera_movement; // meters, integrate only if exceedes
float tsdf_trunc_dist; // meters;
float tsdf_max_weight; // frames
float raycast_step_factor; // in voxel sizes
float gradient_delta_factor; // in voxel sizes
Vec3f light_pose; // meters
};
class KF_EXPORTS KinFu {
public:
typedef cv::Ptr Ptr;
KinFu(const KinFuParams ¶ms);
const KinFuParams ¶ms() const;
KinFuParams ¶ms();
const cuda::TsdfVolume &tsdf() const;
cuda::TsdfVolume &tsdf();
const cuda::ProjectiveICP &icp() const;
cuda::ProjectiveICP &icp();
const cuda::MarchingCubes &mc() const;
cuda::MarchingCubes &mc();
void reset();
bool operator()(const cuda::Depth &depth, const cuda::Image &image = cuda::Image());
void renderImage(cuda::Image &image, int flag = 0);
void renderImage(cuda::Image &image, const Affine3f &pose, int flag = 0);
Affine3f getCameraPose(int time = -1) const;
protected:
void allocate_buffers();
int frame_counter_;
KinFuParams params_;
std::vector poses_;
cuda::Dists dists_;
cuda::Frame curr_, prev_;
cuda::Cloud points_;
cuda::Normals normals_;
cuda::Depth depths_;
cv::Ptr volume_;
cv::Ptr icp_;
cv::Ptr mc_;
};
} // namespace kfusion
================================================
FILE: include/kfusion/precomp.hpp
================================================
#pragma once
/* cuda includes */
#include
/* kinfu includes */
#include
#include
#include
#include