[
  {
    "path": ".clang-format",
    "content": "---\n# roscpp style file taken from https://github.com/PickNikRobotics/roscpp_code_format.git\n\nBasedOnStyle: Google\nAccessModifierOffset: -2\nConstructorInitializerIndentWidth: 2\nAlignEscapedNewlinesLeft: false\nAlignTrailingComments: true\nAllowAllParametersOfDeclarationOnNextLine: false\nAllowShortIfStatementsOnASingleLine: false\nAllowShortLoopsOnASingleLine: false\nAllowShortFunctionsOnASingleLine: None\nAlwaysBreakTemplateDeclarations: true\nAlwaysBreakBeforeMultilineStrings: true\nBreakBeforeBinaryOperators: false\nBreakBeforeTernaryOperators: false\nBreakConstructorInitializersBeforeComma: true\nBinPackParameters: true\nColumnLimit: 120\nConstructorInitializerAllOnOneLineOrOnePerLine: true\nDerivePointerBinding: false\nPointerBindsToType: true\nExperimentalAutoDetectBinPacking: false\nIndentCaseLabels: true\nMaxEmptyLinesToKeep: 1\nNamespaceIndentation: None\nObjCSpaceBeforeProtocolList: true\nPenaltyBreakBeforeFirstCallParameter: 19\nPenaltyBreakComment: 60\nPenaltyBreakString: 1\nPenaltyBreakFirstLessLess: 1000\nPenaltyExcessCharacter: 1000\nPenaltyReturnTypeOnItsOwnLine: 90\nSpacesBeforeTrailingComments: 2\nCpp11BracedListStyle: false\nStandard: Auto\nIndentWidth: 2\nTabWidth: 2\nUseTab: Never\nIndentFunctionDeclarationAfterType: false\nSpacesInParentheses: false\nSpacesInAngles: false\nSpaceInEmptyParentheses: false\nSpacesInCStyleCastParentheses: false\nSpaceAfterControlStatementKeyword: true\nSpaceBeforeAssignmentOperators: true\nContinuationIndentWidth: 4\nSortIncludes: false\nSpaceAfterCStyleCast: false\n\n# Configure each individual brace in BraceWrapping\nBreakBeforeBraces: Custom\n\n# Control of individual brace wrapping cases\nBraceWrapping: {\n    AfterClass: 'true'\n    AfterControlStatement: 'true'\n    AfterEnum : 'true'\n    AfterFunction : 'true'\n    AfterNamespace : 'true'\n    AfterStruct : 'true'\n    AfterUnion : 'true'\n    BeforeCatch : 'true'\n    BeforeElse : 'true'\n    IndentBraces : 'false'\n}\n...\n"
  },
  {
    "path": ".gitattributes",
    "content": "*.npz filter=lfs diff=lfs merge=lfs -text\n"
  },
  {
    "path": ".gitignore",
    "content": "# Prerequisites\n*.d\n\n# Compiled Object files\n*.slo\n*.lo\n*.o\n*.obj\n\n# Precompiled Headers\n*.gch\n*.pch\n\n# Compiled Dynamic libraries\n*.so\n*.dylib\n*.dll\n\n# Fortran module files\n*.mod\n*.smod\n\n# Compiled Static libraries\n*.lai\n*.la\n*.a\n*.lib\n\n# Executables\n*.exe\n*.out\n*.app\n\n# clion ide\n.idea\ncmake-build-debug*\ncmake-build-release*\n.clion*\n\n# vscode ide\n.vscode\n\n# Build directories\nbuild\n\n# clangd build\n.clangd\ncompile_commands.json\n.cache\n\n# resouces folder\nresource\n\n# video files\n*.mp4\n*.mpeg\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"submodules/cnpy\"]\n  path = submodules/cnpy\n  url = https://github.com/ACDSLab/cnpy.git\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "if (CMAKE_VERSION VERSION_LESS 3.12)\n  cmake_minimum_required(VERSION 3.8)\nelse()\n  cmake_minimum_required(VERSION 3.8...3.24)\nendif()\n\n# https://cmake.org/cmake/help/latest/policy/CMP0104.html\nif (CMAKE_VERSION VERSION_LESS 3.24 AND POLICY CMP0104)\n    cmake_policy(SET CMP0104 OLD)\nendif()\n# Get Version from file\nfile(READ \"${CMAKE_CURRENT_SOURCE_DIR}/VERSION\" MPPI_VERSION_FROM_FILE)\n# Clean up trailng whitespace and newlines\nstring(STRIP \"${MPPI_VERSION_FROM_FILE}\" MPPI_VERSION)\nproject(MPPI-Generic VERSION ${MPPI_VERSION} LANGUAGES C CXX CUDA)\n\n####################################\n# Set up options for configuration #\n####################################\noption(MPPI_BUILD_TESTS \"Build unit tests.\" OFF)\noption(MPPI_BUILD_EXAMPLES \"Build example code.\" OFF)\noption(MPPI_USE_CCACHE \"Use ccache (when available) to potentially speed up repeated builds\" ON)\noption(MPPI_USE_CUDA_BARRIERS \"Compile MPPI-Generic with cuda barrier support. Turn off for GPUs older than the 20 series.\" ON)\noption(MPPI_USE_CUDA_BARRIERS_COST \"Compile MPPI-Generic with cuda barriers for cost-only kernels when general barrier support is enabled.\" OFF)\noption(MPPI_USE_CUDA_BARRIERS_DYN \"Compile MPPI-Generic with cuda barriers for dynamics-only kernels when general barrier support is enabled.\" ON)\noption(MPPI_USE_CUDA_BARRIERS_ROLLOUT \"Compile MPPI-Generic with cuda barriers for combined kernels when general barrier support is enabled.\" ON)\noption(MPPI_EXPORT_PACKAGE \"Export this folder as the MPPI installation to the global CMake package repository\" OFF)\nset(MPPI_CUDA_ARCH_LIST \"\" CACHE STRING \"Specific CUDA Architectures to build for. Leave empty for automatic selection.\")\n\n# Configure CMake Cuda Flags for MPPI\nset(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\" \"${CMAKE_CURRENT_SOURCE_DIR}/cmake\")\ninclude(MPPIGenericToolsConfig)\n\nset(BUILD_FLAGS \"-Wuninitialized -Wall\")\nset(CMAKE_CXX_FLAGS \"${CMAKE_CXX_FLAGS} ${BUILD_FLAGS}\")\nset(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -w\")\n\n# Allow the lib location to be overwritten from command line\nif (NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY)\n    get_filename_component(PROJECT_LIBS_DIR ${PROJECT_BINARY_DIR}/lib ABSOLUTE)\n    set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/lib)\nelse()\n    get_filename_component(PROJECT_LIBS_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} ABSOLUTE)\nendif()\n\n# set up cnpy\n# Don't install cnpy to user package registry\nset(TMP_PACKAGE_REGISTRY_EXPORT ${CMAKE_EXPORT_NO_PACKAGE_REGISTRY})\nset(CMAKE_EXPORT_NO_PACKAGE_REGISTRY ON)\nadd_subdirectory(submodules/cnpy)\n# Set the no package registry back to default\nset(CMAKE_EXPORT_NO_PACKAGE_REGISTRY ${TMP_PACKAGE_REGISTRY_EXPORT})\n\n# Find Eigen\nfind_package(Eigen3 REQUIRED)\n\n# REQUIRED for CUDA to correctly align Eigen member variables inside structures/classes\n# https://listengine.tuxfamily.org/lists.tuxfamily.org/eigen/2016/06/msg00006.html\n# add_definitions(-DEIGEN_MAX_STATIC_ALIGN_BYTES=0)\n\n# Generate Version Header File from Project Version\nconfigure_file(\"${PROJECT_SOURCE_DIR}/include/mppi/version.h.in\" \"${PROJECT_BINARY_DIR}/include/mppi/version.h\")\n\n# Create a Header-only MPPI Library\nadd_library(${MPPI_HEADER_LIBRARY_NAME} INTERFACE)\nadd_library(MPPI::MPPI ALIAS ${MPPI_HEADER_LIBRARY_NAME})\ntarget_include_directories(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n  ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES}\n  \"$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>\"\n  \"$<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/include>\"\n  \"$<INSTALL_INTERFACE:include>\"\n)\n\ntarget_link_libraries(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n  ${MPPI_GENERIC_CUDA_EXTRA_LIBS}\n  ${CUDA_LIBRARIES}\n  cnpy\n  Eigen3::Eigen\n)\ntarget_include_directories(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n  \"$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/submodules/cnpy>\"\n  \"$<INSTALL_INTERFACE:include/cnpy>\"\n)\n\n# --display-error-number was added as a compilation flag in CUDA 11.2\nif (${CUDA_VERSION} VERSION_GREATER_EQUAL \"11.2\")\ntarget_compile_options(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n  $<$<COMPILE_LANGUAGE:CUDA>:--display-error-number>\n)\nendif()\n\n# Allow the ability to turn off cuda barriers through CMake. Necessary for GPUs older than\n# the 20 series\nif (MPPI_USE_CUDA_BARRIERS)\n  target_compile_definitions(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n    CMAKE_USE_CUDA_BARRIERS)\n  if (MPPI_USE_CUDA_BARRIERS_DYN)\n    target_compile_definitions(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n      CMAKE_USE_CUDA_BARRIERS_DYN)\n  endif()\n  if (MPPI_USE_CUDA_BARRIERS_ROLLOUT)\n    target_compile_definitions(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n      CMAKE_USE_CUDA_BARRIERS_ROLLOUT)\n  endif()\n  if (MPPI_USE_CUDA_BARRIERS_COST)\n    target_compile_definitions(${MPPI_HEADER_LIBRARY_NAME} INTERFACE\n      CMAKE_USE_CUDA_BARRIERS_COST)\n  endif()\nendif()\n\nif (MPPI_BUILD_TESTS OR MPPI_BUILD_EXAMPLES)\n    # find yaml-cpp\n    find_package(yaml-cpp REQUIRED)\n    include_directories(${YAML_CPP_INCLUDE_DIR})\nendif()\n\n# Install all library header files\ninstall(\n  DIRECTORY ${PROJECT_SOURCE_DIR}/include/mppi\n  DESTINATION include\n  PATTERN \"*.in\" EXCLUDE\n)\n\n# Install generated header file version.h\ninstall(\n  DIRECTORY ${PROJECT_BINARY_DIR}/include/mppi\n  DESTINATION include\n)\n\n###########################################\n# Set up CMake configuration installation #\n###########################################\nset(CMAKE_CONFIG_DEST \"lib/cmake/${PROJECT_NAME}\")\ninstall(\n  TARGETS ${MPPI_HEADER_LIBRARY_NAME}\n  EXPORT ${PROJECT_NAME}-targets\n)\n\ninstall(\n  EXPORT ${PROJECT_NAME}-targets\n  NAMESPACE MPPI::\n  DESTINATION ${CMAKE_CONFIG_DEST}\n  FILE ${PROJECT_NAME}Targets.cmake\n)\n\ninstall(\n  EXPORT ${PROJECT_NAME}-targets\n  DESTINATION ${CMAKE_CONFIG_DEST}\n  FILE ${PROJECT_NAME}TargetsNoNamespace.cmake\n)\n\n# Set up MPPI to export the header library as MPPI in the MPPI namespace\nset_target_properties (${MPPI_HEADER_LIBRARY_NAME} PROPERTIES EXPORT_NAME MPPI)\nexport (TARGETS ${MPPI_HEADER_LIBRARY_NAME}\n        NAMESPACE MPPI::\n        FILE ${PROJECT_BINARY_DIR}/${PROJECT_NAME}Targets.cmake\n)\n\n# Export the package for use from the build-tree\n# (this registers the build-tree with a global CMake-registry ~/.cmake)\nif (MPPI_EXPORT_PACKAGE)\n  export(PACKAGE ${PROJECT_NAME})\nendif()\n\n#################################################################################\n# Create CMake Config and Version files and install in the appropriate location #\n#################################################################################\n\n# Create a CMake variable of the appropriate include directory depending on\n# whether it is consider from the build tree or the install tree\nset(TMP_MPPI_INCLUDE_DIRS \"${PROJECT_SOURCE_DIR}/include\")\nset(TMP_MPPI_TARGET_CMAKE_FILE \"${PROJECT_BINARY_DIR}/${PROJECT_NAME}Targets.cmake\")\n# Set up build tree config file\nconfigure_package_config_file(\n  ${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in\n  ${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake\n  INSTALL_DESTINATION ${PROJECT_BINARY_DIR}\n  PATH_VARS TMP_MPPI_INCLUDE_DIRS TMP_MPPI_TARGET_CMAKE_FILE\n)\n\n# Set up install tree config file\nset(TMP_MPPI_INCLUDE_DIRS \"include/\")\nset(TMP_MPPI_TARGET_CMAKE_FILE \"${CMAKE_CONFIG_DEST}/${PROJECT_NAME}Targets.cmake\")\nconfigure_package_config_file(\n  ${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in\n  ${PROJECT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}Config.cmake\n  INSTALL_DESTINATION ${CMAKE_CONFIG_DEST}\n  PATH_VARS TMP_MPPI_INCLUDE_DIRS TMP_MPPI_TARGET_CMAKE_FILE\n)\n\nwrite_basic_package_version_file(\n  ${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\n  VERSION ${PROJECT_VERSION}\n  COMPATIBILITY SameMinorVersion\n)\n\nconfigure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/MPPIGenericToolsConfig.cmake\n  ${PROJECT_BINARY_DIR}/MPPIGenericToolsConfig.cmake\n  COPYONLY\n)\n\ninstall(FILES\n  ${PROJECT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}Config.cmake\n  ${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake\n  ${CMAKE_CURRENT_SOURCE_DIR}/cmake/MPPIGenericToolsConfig.cmake\n  DESTINATION ${CMAKE_CONFIG_DEST}\n)\n\nif (MPPI_BUILD_TESTS OR MPPI_BUILD_EXAMPLES)\n    add_subdirectory(src)\nendif()\nif (MPPI_BUILD_EXAMPLES)\n    add_subdirectory(examples)\nendif()\n\n# Add custom cmake finds\nset(CMAKE_MODULE_PATH \"${CMAKE_MODULE_PATH}\" \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules\")\n\n# Use CMake to download gtest as part of the configure step\n###################################################################\n# Add gtest\n###################################################################\nif (NOT DEFINED CMAKE_TOOLCHAIN_FILE AND MPPI_BUILD_TESTS)\n  message(STATUS \"Building MPPI Generic tests\")\n  enable_testing()\n  ############################################################\n  # copied from\n  # https://github.com/google/googletest/tree/master/googletest#incorporating-into-an-existing-cmake-project\n  ############################################################\n  # Download and unpack googletest at configure time\n  if (NOT TARGET gtest_main)\n    list(GET CMAKE_MODULE_PATH -1 MPPI_GENERIC_MODULES)\n    configure_file(${MPPI_GENERIC_MODULES}/CMakeLists.txt.gtest.in\n                   ${PROJECT_BINARY_DIR}/googletest-download/CMakeLists.txt)\n    execute_process(COMMAND ${CMAKE_COMMAND} -G \"${CMAKE_GENERATOR}\" .\n      RESULT_VARIABLE result\n      WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/googletest-download )\n    if(result)\n      message(FATAL_ERROR \"CMake step for googletest failed: ${result}\")\n    endif()\n    execute_process(COMMAND ${CMAKE_COMMAND} --build .\n      RESULT_VARIABLE result\n      WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/googletest-download )\n    if(result)\n      message(FATAL_ERROR \"Build step for googletest failed: ${result}\")\n    endif()\n\n    # Prevent overriding the parent project's compiler/linker\n    # settings on Windows\n    set(gtest_force_shared_crt ON CACHE BOOL \"\" FORCE)\n\n    # Prevent googletest installation\n    set(INSTALL_GTEST OFF)\n\n    # Add googletest directly to our build. This defines\n    # the gtest and gtest_main targets.\n    add_subdirectory(${PROJECT_BINARY_DIR}/googletest-src\n                     ${PROJECT_BINARY_DIR}/googletest-build)\n  endif()\n\n  include(GoogleTest)\n  add_subdirectory(tests)\nendif()\n\n# Uninstall\nif(NOT TARGET uninstall)\n  configure_file(\n    \"${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules/cmake_uninstall.cmake.in\"\n    \"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake\"\n    IMMEDIATE @ONLY)\n\n  add_custom_target(uninstall\n    COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)\nendif()\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 2-Clause License\n\nCopyright (c) 2020, Georgia Institute of Technology\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n   list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n   this list of conditions and the following disclaimer in the documentation\n   and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "# MPPI-Generic\nMPPI-Generic is a C++/CUDA header-only library implementation of Model Predictive Path Integral Control (MPPI) by [Williams et al.](https://ieeexplore.ieee.org/document/8558663)\n\n## Citation\nIf you use this library for research purposes, please cite the following [paper](https://arxiv.org/abs/2409.07563):\n```\n@misc{vlahov2024mppi,\n      title={MPPI-Generic: A CUDA Library for Stochastic Trajectory Optimization},\n      author={Bogdan Vlahov and Jason Gibson and Manan Gandhi and Evangelos A. Theodorou},\n      year={2024},\n      eprint={2409.07563},\n      archivePrefix={arXiv},\n      primaryClass={cs.MS},\n      url={https://arxiv.org/abs/2409.07563},\n}\n```\n\n## Requirements\nMPPI-Generic relies on the following:\n* An NVIDIA GPU\n* GCC/G++\n* CUDA 10 or newer (CUDA 11.7+ is recommended but our library is compatible back to CUDA 10)\n* [Eigen](https://eigen.tuxfamily.org/index.php?title=Main_Page)\n* [CMake](https://cmake.org/) 3.10 or newer\n* git and git-lfs\n### Unit tests requirements\n* [yaml-cpp](https://github.com/jbeder/yaml-cpp)\n* python-pil\n\n## Prerequisite Setup (Ubuntu)\n1. Follow the instructions to install CUDA provided [here](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html).\n\n2. Install all the other prerequisites through `apt-get`:\n```bash\nsudo apt-get install libeigen3-dev git git-lfs cmake gcc\n# Setup git lfs if it is the first you have installed it\ngit lfs install\n# extra installs if you are wanting to build unit tests\nsudo apt-get install libyaml-cpp-dev python3-pil\n```\n\nNote: If using Pop!\\_OS you can `sudo apt install system76-cuda` instead of installing CUDA manually.\n\n## Download the repo\n```bash\ncd /path/to/repos\ngit clone https://github.com/ACDSLab/MPPI-Generic.git\ncd MPPI-Generic\ngit submodule update --init --recursive\n```\n## Building MPPI-Generic with tests\n\nThe default is to build the library with tests OFF.\nIf you would like to turn on the tests when building, pass the flag `-DMPPI_BUILD_TESTS=ON` when configuring cmake.\n\n```bash\nmkdir build\ncd build\ncmake -DMPPI_BUILD_TESTS=ON ..\nmake\nmake test\n```\n\n## Acknowledgements\nApproved for Public Release, Distribution Unlimited.\n"
  },
  {
    "path": "VERSION",
    "content": "0.9.0\n"
  },
  {
    "path": "cmake/Config.cmake.in",
    "content": "# - Config file for the MPPI-Generic package\n# It defines the following variables\n#  MPPI_INCLUDE_DIRS - include directories for MPPI-Generic\n#  MPPI_INCLUDE_DIR  - include directories for MPPI-Generic\n#  MPPI_LIBRARIES    - libraries to link against\n\n@PACKAGE_INIT@\ninclude(CMakeFindDependencyMacro)\n\nset(MPPI_LIBRARIES \"MPPI::MPPI\")\nset_and_check(MPPI_INCLUDE_DIRS \"@PACKAGE_TMP_MPPI_INCLUDE_DIRS@\")\nset_and_check(MPPI_TARGET_CMAKE_FILE \"@PACKAGE_TMP_MPPI_TARGET_CMAKE_FILE@\")\n\n# Find dependencies of MPPI-Generic\nfind_dependency(cnpy REQUIRED HINTS ${PACKAGE_PREFIX_DIR})\nfind_dependency(Eigen3 REQUIRED)\n\n# Set up cmake targets and autodetection for CUDA architectures\ninclude(${MPPI_TARGET_CMAKE_FILE})\ninclude(\"${CMAKE_CURRENT_LIST_DIR}/MPPIGenericToolsConfig.cmake\")\n\nset(MPPI_INCLUDE_DIR ${MPPI_INCLUDE_DIRS})\ncheck_required_components(@PROJECT_NAME@)\n"
  },
  {
    "path": "cmake/MPPIGenericToolsConfig.cmake",
    "content": "if(NOT CMAKE_BUILD_TYPE)\n  set(CMAKE_BUILD_TYPE Release CACHE STRING \"Build type options: Release, RelWithDebInfo, Debug\" FORCE)\n  message(STATUS \"Setting Build Type to ${CMAKE_BUILD_TYPE} by default\")\nendif()\n\nif(NOT DEFINED CMAKE_CXX_STANDARD)\n  if (CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL \"13\")\n    # Cuda's libcu++ requires C++17 on CUDA 13+\n    set(CMAKE_CXX_STANDARD 17)\n  else()\n    set(CMAKE_CXX_STANDARD 11)\n  endif()\nendif()\nif (NOT DEFINED CMAKE_CUDA_STANDARD)\n  set(CMAKE_CUDA_STANDARD ${CMAKE_CXX_STANDARD})\nendif()\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CUDA_STANDARD_REQUIRED ON)\nset(CMAKE_EXPORT_COMPILE_COMMANDS ON)\n\n# Look for ccache to potentially speed up repeated compilations\nfind_program(CCACHE_PROGRAM ccache)\nif (CCACHE_PROGRAM AND NOT MPPI_USE_CCACHE)\n  message(STATUS \"Using ccache to speed up repeated builds\")\n  set(CMAKE_C_COMPILER_LAUNCHER ${CCACHE_PROGRAM})\n  set(CMAKE_CXX_COMPILER_LAUNCHER ${CCACHE_PROGRAM})\n  set(CMAKE_CUDA_COMPILER_LAUNCHER ${CCACHE_PROGRAM})\nelseif(NOT CCACHE_PROGRAM AND NOT MPPI_USE_CCACHE)\n  message(STATUS \"ccache not found. Using ccache can speed up repeated builds.\")\nendif()\n\n# Add debug flags so cuda-gdb can be used to stop inside a kernel.\n# NOTE: You may have to run make multiple times for it to compile successfully.\nset(CMAKE_CUDA_FLAGS_DEBUG \"${CMAKE_CUDA_FLAGS_DEBUG} -G --keep\")\nset(CMAKE_CUDA_FLAGS_RELWITHDEBINFO \"${CMAKE_CUDA_FLAGS_RELWITHDEBINFO} --generate-line-info\")\n\n# Generate variable for all the extra cuda libraries we use\nset(MPPI_GENERIC_CUDA_EXTRA_LIBS \"\")\nif (CMAKE_VERSION VERSION_LESS 3.24)\n  # required for curand on some systems\n  find_package(CUDA REQUIRED)\n\n  if(${CUDA_curand_LIBRARY} MATCHES \"NOTFOUND\")\n      message(ERROR \"cuRAND library not found.\")\n  endif()\n\n  list(APPEND MPPI_GENERIC_CUDA_EXTRA_LIBS ${CUDA_curand_LIBRARY} ${CUDA_CUFFT_LIBRARIES})\nelse()\n  find_package(CUDAToolkit REQUIRED)\n  set(CUDA_VERSION ${CUDAToolkit_VERSION})\n  list(APPEND MPPI_GENERIC_CUDA_EXTRA_LIBS CUDA::curand CUDA::cufft)\nendif()\n\n# Generate name for MPPI header library\nset(MPPI_HEADER_LIBRARY_NAME mppi_header_only_lib)\n\nset(CUDA_PROPAGATE_HOST_FLAGS OFF)\n\n#################################################################\n# Autodetect Cuda Architecture on system and add to executables #\n#################################################################\n# CMake 3.24 added '-arch=native' support so until that version, we need to use the old method of autodetection\nif (CMAKE_VERSION VERSION_LESS 3.24)\n  # Don't rerun autodetection when used as a submodule\n  if (NOT DEFINED MPPI_ARCH_FLAGS)\n    # More info for autodetection:\n    # https://stackoverflow.com/questions/35485087/determining-which-gencode-compute-arch-values-i-need-for-nvcc-within-cmak\n    CUDA_SELECT_NVCC_ARCH_FLAGS(MPPI_ARCH_FLAGS ${MPPI_CUDA_ARCH_LIST})\n\n    if (MPPI_ARCH_FLAGS STREQUAL \"\")\n      set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -maxrregcount=32 -arch=sm_35\")\n    else()\n      string(REGEX REPLACE \"-gencode;arch\" \"-gencode=arch\" MPPI_ARCH_FLAGS \"${MPPI_ARCH_FLAGS}\")\n      string(REPLACE \";\" \" \" MPPI_ARCH_FLAGS \"${MPPI_ARCH_FLAGS}\")\n      # string(REGEX REPLACE \"^-gencode=arch\" \"-arch\" MPPI_ARCH_FLAGS \"${MPPI_ARCH_FLAGS}\")\n      message(STATUS \"CUDA Architecture(s): ${MPPI_ARCH_FLAGS}\")\n      set(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} ${MPPI_ARCH_FLAGS}\")\n    endif()\n  else()\n    message(STATUS \"Autodetection already ran and found ${MPPI_ARCH_FLAGS}.\")\n  endif()\nelse()\n  # Don't rerun autodetection when used as a submodule\n  if (NOT DEFINED MPPI_ARCH_FLAGS)\n    if (\"${MPPI_CUDA_ARCH_LIST}\" STREQUAL \"\")\n      set(CMAKE_CUDA_ARCHITECTURES native)\n    else()\n      set(CMAKE_CUDA_ARCHITECTURES \"${MPPI_CUDA_ARCH_LIST}\")\n    endif()\n    set(MPPI_ARCH_FLAGS \"${CMAKE_CUDA_ARCHITECTURES}\")\n    message(STATUS \"CUDA Architecture(s): ${CMAKE_CUDA_ARCHITECTURES}\")\n  else()\n    message(STATUS \"Autodetection already ran and found ${MPPI_ARCH_FLAGS}.\")\n  endif()\nendif()\n"
  },
  {
    "path": "cmake/Modules/CMakeLists.txt.gtest.in",
    "content": "cmake_minimum_required(VERSION 2.8.2)\nproject(googletest-download NONE)\n\ninclude(ExternalProject)\nExternalProject_Add(googletest\n    GIT_REPOSITORY https://github.com/google/googletest.git\n    GIT_TAG release-1.12.1\n    SOURCE_DIR \"${PROJECT_BINARY_DIR}/googletest-src\"\n    BINARY_DIR \"${PROJECT_BINARY_DIR}/googletest-build\"\n    CONFIGURE_COMMAND \"\"\n    BUILD_COMMAND \"\"\n    INSTALL_COMMAND \"\"\n    TEST_COMMAND \"\"\n)\n"
  },
  {
    "path": "cmake/Modules/cmake_uninstall.cmake.in",
    "content": "# ============================================================================\n# Copyright (c) 2011-2012 University of Pennsylvania\n# Copyright (c) 2013-2014 Carnegie Mellon University\n# Copyright (c) 2013-2016 Andreas Schuh\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n#    list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n#    this list of conditions and the following disclaimer in the documentation\n#    and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# See COPYING file for license information or visit\n# https://cmake-basis.github.io/download.html#license\n# ============================================================================\n\n##############################################################################\n# @file  cmake_uninstall.cmake\n# @brief Uninstallation script based on install_manifest*.txt files.\n#\n# @ingroup CMakeTools\n##############################################################################\n\ncmake_minimum_required (VERSION 2.8.12 FATAL_ERROR)\n\n# ----------------------------------------------------------------------------\n# set the install prefix\nif (NOT DEFINED CMAKE_INSTALL_PREFIX)\n  set (CMAKE_INSTALL_PREFIX \"@CMAKE_INSTALL_PREFIX@\")\nendif ()\n\n# ----------------------------------------------------------------------------\n# set the install configuration name\nif (NOT DEFINED CMAKE_INSTALL_CONFIG_NAME)\n  if (BUILD_TYPE)\n    string (REGEX REPLACE \"^[^A-Za-z0-9_]+\" \"\" CMAKE_INSTALL_CONFIG_NAME \"${BUILD_TYPE}\")\n  else ()\n    set (CMAKE_INSTALL_CONFIG_NAME \"@CMAKE_BUILD_TYPE@\")\n  endif ()\n  message (STATUS \"Uninstall configuration: \\\"${CMAKE_INSTALL_CONFIG_NAME}\\\"\")\nendif ()\n\n# ----------------------------------------------------------------------------\n# set the component getting uninstalled\nif (NOT CMAKE_INSTALL_COMPONENT)\n  if (COMPONENT)\n    message (STATUS \"Uninstall component: \\\"${COMPONENT}\\\"\")\n    set (CMAKE_INSTALL_COMPONENT \"${COMPONENT}\")\n  else ()\n    set (CMAKE_INSTALL_COMPONENT)\n  endif ()\nendif ()\n\n# ----------------------------------------------------------------------------\n# read manifest file\nif (MANIFEST_FILE)\n  if (NOT EXISTS \"${MANIFEST_FILE}\")\n    message (FATAL_ERROR \"Manifest file ${MANIFEST_FILE} does not exist!\")\n  endif ()\n  set (MANIFEST_FILES \"${MANIFEST_FILE}\")\nelse ()\n  file (GLOB MANIFEST_FILES \"${CMAKE_CURRENT_LIST_DIR}/@PROJECT_PACKAGE_CONFIG_PREFIX@*InstallManifest.txt\")\n  if (NOT MANIFEST_FILES)\n    if (CMAKE_INSTALL_COMPONENT)\n      set (MANIFEST_FILE \"${CMAKE_CURRENT_LIST_DIR}/install_manifest_${CMAKE_INSTALL_COMPONENT}.txt\")\n    else ()\n      set (MANIFEST_FILE \"${CMAKE_CURRENT_LIST_DIR}/install_manifest.txt\")\n    endif ()\n    if (NOT EXISTS \"${MANIFEST_FILE}\")\n      message (\"No manifest file found.\")\n      return ()\n    endif ()\n    set (MANIFEST_FILES \"${MANIFEST_FILE}\")\n  endif ()\nendif ()\n\nset (MANIFEST)\nforeach (MANIFEST_FILE IN LISTS MANIFEST_FILES)\n  file (READ \"${MANIFEST_FILE}\" _MANIFEST)\n  string (REGEX REPLACE \"\\n\" \";\" _MANIFEST \"${_MANIFEST}\")\n  list (REVERSE _MANIFEST)\n  list (APPEND MANIFEST \"${_MANIFEST}\")\nendforeach ()\n\n# ----------------------------------------------------------------------------\n# remove package from CMake package registry\nset (REGISTERED \"@BASIS_REGISTER@\")\nif (WIN32 AND REGISTERED)\n  set (PKGUID \"@TOPLEVEL_PROJECT_PACKAGE_UID@\")\n  execute_process (\n    COMMAND reg delete \"HKCU\\\\Software\\\\Kitware\\\\CMake\\\\Packages\\\\@PROJECT_PACKAGE_CONFIG_PREFIX@\" /v \"${PKGUID}\" /f\n    RESULT_VARIABLE RT\n    ERROR_VARIABLE ERR\n  )\n  if (RT EQUAL 0)\n    message (STATUS \"Deregister:   Removed HKCU\\\\Software\\\\Kitware\\\\CMake\\\\Packages\\\\@PROJECT_PACKAGE_CONFIG_PREFIX@\\\\${PKGUID}\")\n  else ()\n    string (STRIP \"${ERR}\" ERR)\n    message (STATUS \"Deregister:   Failed to remove package from registry: ${ERR}\")\n  endif ()\nendif ()\n\n# ----------------------------------------------------------------------------\n# remove installed files\nforeach (F ${MANIFEST}) # skip empty entries, i.e., blank lines\n  set (F \"$ENV{DESTDIR}${F}\") # support change of root\n  if (EXISTS \"${F}\")\n    set (FILE_IN_USE FALSE)\n    if (NOT FILE_IN_USE)\n      message (STATUS \"Uninstalling: ${F}\")\n      execute_process (COMMAND \"${CMAKE_COMMAND}\" -E remove -f \"${F}\" RESULT_VARIABLE RT)\n      if (NOT RT EQUAL 0)\n        set (OK FALSE)\n        message (STATUS \"Failed to uninstall ${F}\")\n      endif ()\n      # remove .pyc files of .py files\n      if (F MATCHES \"\\\\.py$\" AND EXISTS \"${F}c\")\n        message (STATUS \"Uninstalling: ${F}c\")\n        execute_process (COMMAND \"${CMAKE_COMMAND}\" -E remove -f \"${F}c\" RESULT_VARIABLE RT)\n        if (NOT RT EQUAL 0)\n          message (STATUS \"Failed to uninstall ${F}c\")\n        endif ()\n      endif ()\n    else ()\n      message (STATUS \"File-in-use:  ${F}\")\n    endif ()\n  else ()\n    message (STATUS \"Non-existent: ${F}\")\n  endif ()\nendforeach ()\n\nforeach (MANIFEST_FILE IN LISTS MANIFEST_FILES)\n  if (EXISTS \"${MANIFEST_FILE}\")\n    execute_process (COMMAND \"${CMAKE_COMMAND}\" -E remove -f \"${MANIFEST_FILE}\")\n  endif ()\nendforeach ()\n\n# ----------------------------------------------------------------------------\n# remove empty directories\nlist (APPEND EXCLUDE_DIRS\n  \"/\"\n  \"/usr\"\n  \"/usr/local\"\n  \"/opt\"\n  \"/opt/local\"\n  \"$ENV{HOME}\"\n  \"$ENV{HOME}/local\"\n  # these should anyway never be used as installation prefix without subdirectory\n  \"/bin\"\n  \"/boot\"\n  \"/dev\"\n  \"/etc\"\n  \"/home\"\n  \"/lib\"\n  \"/lib32\"\n  \"/lib64\"\n  \"/media\"\n  \"/mnt\"\n  \"/root\"\n  \"/proc\"\n  \"/sys\"\n  \"/var\"\n  \"/tmp\"\n  \"/lost+found\"\n  \"/cdrom\"\n)\n\nif (WIN32)\n  get_filename_component (PROGRAM_FILES_DIR \"[HKEY_LOCAL_MACHINE\\\\SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion;ProgramFilesDir]\" ABSOLUTE)\n  if (NOT PROGRAM_FILES_DIR OR PROGRAM_FILES_DIR MATCHES \"/registry\")\n    set (PROGRAM_FILES_DIR \"C:/Program Files\")\n  endif ()\n  list (APPEND EXCLUDE_DIRS \"${PROGRAM_FILES_DIR}\")\n  string (REPLACE \"/\" \"\\\\\" PROGRAM_FILES_DIR \"${PROGRAM_FILES_DIR}\")\n  list (APPEND EXCLUDE_DIRS \"${PROGRAM_FILES_DIR}\")\nendif ()\n\n# stop removing directories at root installation directory\n# the subdirectory will be still removed if it is not in the\n# list of excluded system directories\nget_filename_component (D \"$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}\" PATH)\nlist (APPEND EXCLUDE_DIRS \"${D}\")\n\nstring (REPLACE \".\" \"\\\\.\" CMAKE_INSTALL_PREFIX_RE \"$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}\")\nstring (REPLACE \".\" \"\\\\.\" CMAKE_REGISTRY_PREFIX_RE \"$ENV{HOME}/.cmake\")\n\nforeach (F ${MANIFEST}) # skip empty entries, i.e., blank lines\n  # remove directories only if file was installed inside the installation root\n  # or the CMake package registration on Unix\n  if (F MATCHES \"^${CMAKE_INSTALL_PREFIX_RE}\" OR\n        (UNIX AND F MATCHES \"^${CMAKE_REGISTRY_PREFIX_RE}\"))\n    get_filename_component (D \"$ENV{DESTDIR}${F}\" PATH)\n    while (D)\n      # skip directory if we removed it already\n      if (NOT EXISTS \"${D}\" OR NOT IS_DIRECTORY \"${D}\")\n        if (\"${D}\" STREQUAL \"$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}\")\n          return () # we are done, the installation root has been removed\n        endif ()\n        break ()\n      endif ()\n      # skip directory if it is in list of excluded directories\n      list (FIND EXCLUDE_DIRS \"${D}\" IDX)\n      if (NOT IDX EQUAL -1)\n        break ()\n      endif ()\n      # glob files in directory to make sure it is empty\n      file (GLOB FILES \"${D}/*\")\n      if (NOT FILES)\n        # remove directory\n        message (STATUS \"Uninstalling: ${D}\")\n        execute_process (COMMAND \"${CMAKE_COMMAND}\" -E remove_directory \"${D}\" RESULT_VARIABLE RT)\n        if (NOT RT EQUAL 0)\n          set (OK FALSE)\n          message (STATUS \"Failed to remove ${D}\")\n        endif ()\n      endif ()\n      if (\"${D}\" STREQUAL \"$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}\")\n        # we reached the root installation direcory\n        break ()\n      endif ()\n      # procede with parent directory\n      get_filename_component (D \"${D}\" PATH)\n    endwhile ()\n  endif ()\nendforeach ()\n"
  },
  {
    "path": "doc/feedback.md",
    "content": "# Overview\r\nThe overall structure for the Feedback Controller is a bit confusing but I will try to lay out the basic idea here.\r\nThere are two halves of the Feedback Controller, the GPU portion and the CPU portion.\r\nThese are separated into different classes as we need to do different things on the different devices.\r\nThe GPU portion just needs to provide feedback control while the CPU portion needs to do that and have the ability to update the feedback controller itself.\r\n\r\n\r\n# Steps to creating a new FeedbackController\r\n## Create a new FeedbackParams struct\r\nThis should be where the necessary data for the CPU portion of the controller resides.\r\nThis does not get copied over to the GPU so the struct can have Eigen matrices in it.\r\nFor example, this struct has the Q, Q_f, and R matrices for DDP and could have the desired poles for a PID controller.\r\n\r\n## Create  a new FeedbackState struct\r\nThis is the data structure that will contain everything necessary to actually compute feedback on the GPU and CPU.\r\nThis struct does get copied to the GPU so try to use float arrays of known sizes to prevent a need to rewrite the copy to GPU code.\r\nIt should also inherit from the GPUState struct in `feedback.cuh` as that has a necessary variable SHARED_MEM_SIZE that is used to allocate shared memory on the GPU for the feedback controller to use.\r\nThis space would be needed for keeping track of PID integrated errors for each sample of MPPI as an example.\r\nFor DDP, this is where the trajectory of feedback gains is stored, and for a PID controller, this would be where you put your P, I, and D gains.\r\n\r\n## Create a GPUFeedbackController Class\r\nThis should inherit from the GPUFeedbackController template in `feedback.cuh` as that provides a lot of methods already.\r\nYou at minimum need to write the `void k(x_act, x_goal, t, theta, control_output)` method to return the feedback control (`control output`) you would expect given the current state (`x_act`), the goal state (`x_goal`), the timestep you are on (`t`), and some potentially scratch space (`theta`).\r\nFor DDP, this is ends up being essentially `feedback_gain_traj[t] * (x_act - x_goal)`  (no use for `theta`).\r\n\r\nThis class also has methods you can overwrite for copyToDevice() and copyFromDevice() if you have some fancy GPUFeedback or want to get diagnostic information back from the GPU.\r\n\r\n## Create a FeedbackController Class\r\nThis should inherit from the FeedbackController template in `feedback.cuh` which requires the GPUFeedback Class and the FeedbackParams struct you made earlier.\r\nThere are 3 methods at minimum you need to overwrite for this class.\r\n\r\n1. `void initTrackingController()`  is where you can do some setup that might not be done in the  constructor (DDP uses this to setup the ddp optimizer using the current params)\r\n2. `void computeFeedback(init_state, goal_state_traj, control_traj)` is where you can write how to update your feedback controller.\r\nFor DDP, this is where the optimization occurs and the new feedback gains trajectory is put into the DDPFeedbackState struct.\r\n3. `control_array k_(x_act, x_goal, t, fb_state)` is the CPU version of calculating the feedback control.\r\nIt has the FeedbackState passed in as there are times that the feedback control needs to be calculated from a different place than the internal FeedbackState (the internal state might be in the middle of updating for example).\r\nThe resulting control needs to be the same as the GPU Controller output given the same FeedbackState.\r\n\r\n# Conclusion\r\nAfter creating all of the components, you can then use this controller class as the feedback controller template argument in MPPI, Tube, RMPPI, and it \"should\" plug in without any problems. The DDP implementation is the only example at the moment so use that as a guiding resource if you get lost.\r\n"
  },
  {
    "path": "examples/CMakeLists.txt",
    "content": "# add_executable(cartpole_example cartpole_example.cpp)\n# target_include_directories(cartpole_example PUBLIC ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})\n# target_link_libraries(cartpole_example\n#                       cnpy\n#                       cartpole_mppi)\n\nfile(GLOB CUDA_TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS CUDA_TARGET_SRCS)\n  # Get filename without extension to use as target name\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${T_FILE})\n  target_include_directories(${T_NAME} PUBLIC ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})\n  target_link_libraries(${T_NAME}\n                        ${MPPI_HEADER_LIBRARY_NAME}\n                        )\n  # set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "examples/cartpole_example.cu",
    "content": "#include <mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh>\n#include <iostream>\n#include <chrono>\n\nusing SAMPLER_T = mppi::sampling_distributions::GaussianDistribution<CartpoleDynamics::DYN_PARAMS_T>;\n\nint main(int argc, char** argv)\n{\n  auto model = new CartpoleDynamics(1.0, 1.0, 1.0);\n  auto cost = new CartpoleQuadraticCost;\n\n  model->control_rngs_->x = -5;\n  model->control_rngs_->y = 5;\n\n  CartpoleQuadraticCostParams new_params;\n  new_params.cart_position_coeff = 50;\n  new_params.pole_angle_coeff = 200;\n  new_params.cart_velocity_coeff = 10;\n  new_params.pole_angular_velocity_coeff = 1;\n  new_params.control_cost_coeff[0] = 0;\n  new_params.terminal_cost_coeff = 0;\n  new_params.desired_terminal_state[0] = 20;\n  new_params.desired_terminal_state[1] = 0;\n  new_params.desired_terminal_state[2] = M_PI;\n  new_params.desired_terminal_state[3] = 0;\n\n  cost->setParams(new_params);\n\n  float dt = 0.02;\n  int max_iter = 1;\n  float lambda = 0.25;\n  float alpha = 0.0;\n  const int num_timesteps = 100;\n\n  // Set up Gaussian Distribution\n  auto sampler_params = SAMPLER_T::SAMPLING_PARAMS_T();\n  for (int i = 0; i < CartpoleDynamics::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 5.0;\n  }\n  auto sampler = new SAMPLER_T(sampler_params);\n\n  // Feedback Controller\n  auto fb_controller = new DDPFeedback<CartpoleDynamics, num_timesteps>(model, dt);\n\n  auto CartpoleController =\n      new VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost, DDPFeedback<CartpoleDynamics, num_timesteps>,\n                                num_timesteps, 2048>(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = CartpoleController->getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 4, 1);\n  CartpoleController->setParams(controller_params);\n\n  CartpoleDynamics::state_array current_state = model->getZeroState();\n  CartpoleDynamics::state_array next_state = model->getZeroState();\n  CartpoleDynamics::output_array output = CartpoleDynamics::output_array::Zero();\n\n  int time_horizon = 5000;\n\n  CartpoleDynamics::state_array xdot = model->getZeroState();\n\n  auto time_start = std::chrono::system_clock::now();\n  for (int i = 0; i < time_horizon; ++i)\n  {\n    // Compute the control\n    CartpoleController->computeControl(current_state, 1);\n\n    // Increment the state\n    CartpoleDynamics::control_array control;\n    control = CartpoleController->getControlSeq().block(0, 0, CartpoleDynamics::CONTROL_DIM, 1);\n    model->enforceConstraints(current_state, control);\n    model->step(current_state, next_state, xdot, control, output, i, dt);\n    current_state = next_state;\n\n    if (i % 50 == 0)\n    {\n      printf(\"Current Time: %f    \", i * dt);\n      printf(\"Current Baseline Cost: %f    \", CartpoleController->getBaselineCost());\n      model->printState(current_state.data());\n      //      std::cout << control << std::endl;\n    }\n\n    // Slide the controls down before calling the optimizer again\n    CartpoleController->slideControlSequence(1);\n  }\n  auto time_end = std::chrono::system_clock::now();\n  auto diff = std::chrono::duration<double, std::milli>(time_end - time_start);\n  printf(\"The elapsed time is: %f milliseconds\\n\", diff.count());\n  //    std::cout << \"The current control at timestep \" << i << \" is: \" << CartpoleController.get_control_seq()[i] <<\n  //    std::endl;\n\n  // cost->freeCudaMem();\n  delete (CartpoleController);\n  delete (cost);\n  delete (model);\n  delete (fb_controller);\n  delete sampler;\n\n  return 0;\n}\n"
  },
  {
    "path": "examples/double_integrator_CORL2020.cu",
    "content": "#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_robust_cost.cuh>\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh>\n#include <mppi/controllers/R-MPPI/robust_mppi_controller.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\n#include <cnpy.h>\n#include <random>  // Used to generate random noise for control trajectories\n\nbool tubeFailure(float* s)\n{\n  float inner_path_radius2 = 1.675 * 1.675;\n  float outer_path_radius2 = 2.325 * 2.325;\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  if ((radial_position < inner_path_radius2) || (radial_position > outer_path_radius2))\n  {\n    return true;\n  }\n  else\n  {\n    return false;\n  }\n}\n\nusing Dyn = DoubleIntegratorDynamics;\nusing SCost = DoubleIntegratorCircleCost;\nusing RCost = DoubleIntegratorRobustCost;\nconst int num_timesteps = 50;  // Optimization time horizon\nconst int total_time_horizon = 5000;\nusing Feedback = DDPFeedback<Dyn, num_timesteps>;\nusing Sampler = mppi::sampling_distributions::GaussianDistribution<Dyn::DYN_PARAMS_T>;\n\n// Problem setup\nconst float dt = 0.02;   // Timestep of dynamics propagation\nconst int max_iter = 1;  // Maximum running iterations of optimization\nconst float lambda = 2;  // Learning rate parameter\nconst float alpha = 0.0;\n\ntypedef Eigen::Matrix<float, Dyn::STATE_DIM, num_timesteps> state_trajectory;\n\nvoid saveTraj(const Eigen::Ref<const state_trajectory>& traj, int t, std::vector<float>& vec)\n{\n  for (int i = 0; i < num_timesteps; i++)\n  {\n    for (int j = 0; j < Dyn::STATE_DIM; j++)\n    {\n      vec[t * num_timesteps * Dyn::STATE_DIM + i * Dyn::STATE_DIM + j] = traj(j, i);\n    }\n  }\n}\n\nvoid saveState(const Eigen::Ref<const Dyn::state_array>& state, int t, std::vector<float>& vec)\n{\n  for (int j = 0; j < Dyn::STATE_DIM; j++)\n  {\n    vec[t * Dyn::STATE_DIM + j] = state(j);\n  }\n}\n\nvoid runVanilla(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  Dyn::state_array x;\n  x << 2, 0, 0, 1;\n  Dyn::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> van_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> van_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> van_free_energy(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model;\n  SCost cost;\n  Sampler sampler(sampler_params);\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n  auto controller = VanillaMPPIController<Dyn, SCost, Feedback, num_timesteps, 1024, Sampler>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n  controller.initFeedback();\n\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Vanilla **********************/\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, van_trajectory);\n    saveTraj(nominal_trajectory, t, van_nominal_traj);\n    van_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"vanilla_state_trajectory.npy\", van_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_nominal_trajectory.npy\", van_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_free_energy.npy\", van_free_energy.data(), { total_time_horizon }, \"w\");\n}\n\nvoid runVanillaLarge(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  DoubleIntegratorDynamics::state_array x;\n  x << 2, 0, 0, 1;\n  DoubleIntegratorDynamics::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> van_large_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> van_large_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> van_large_free_energy(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model(100);\n  SCost cost;\n  Sampler sampler(sampler_params);\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n  auto controller = VanillaMPPIController<Dyn, SCost, Feedback, num_timesteps, 1024, Sampler>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n  controller.initFeedback();\n\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Vanilla Large **********************/\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, van_large_trajectory);\n    saveTraj(nominal_trajectory, t, van_large_nominal_traj);\n    van_large_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"vanilla_large_state_trajectory.npy\", van_large_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_large_nominal_trajectory.npy\", van_large_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_large_free_energy.npy\", van_large_free_energy.data(), { total_time_horizon }, \"w\");\n}\n\nvoid runVanillaLargeRC(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  DoubleIntegratorDynamics::state_array x;\n  x << 2, 0, 0, 1;\n  DoubleIntegratorDynamics::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> van_large_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> van_large_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> van_large_free_energy(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model(100);\n\n  RCost cost;\n  Sampler sampler(sampler_params);\n  auto params = cost.getParams();\n  params.crash_cost = 100;\n  cost.setParams(params);\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n\n  auto controller = VanillaMPPIController<Dyn, RCost, Feedback, num_timesteps, 1024, Sampler>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n  controller.initFeedback();\n\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Vanilla Large **********************/\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, van_large_trajectory);\n    saveTraj(nominal_trajectory, t, van_large_nominal_traj);\n    van_large_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"vanilla_large_robust_state_trajectory.npy\", van_large_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_large_robust_nominal_trajectory.npy\", van_large_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_large_robust_free_energy.npy\", van_large_free_energy.data(), { total_time_horizon }, \"w\");\n}\n\nvoid runTube(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  DoubleIntegratorDynamics::state_array x;\n  x << 2, 0, 0, 1;\n  DoubleIntegratorDynamics::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> tube_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> tube_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> tube_nominal_free_energy(total_time_horizon, 0);\n  std::vector<float> tube_real_free_energy(total_time_horizon, 0);\n  std::vector<float> tube_nominal_state_used(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model(100);\n  SCost cost;\n  Sampler sampler(sampler_params);\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n  auto controller = TubeMPPIController<Dyn, SCost, Feedback, num_timesteps, 1024, Sampler>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n  controller.setNominalThreshold(20);\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Tube **********************/\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, tube_trajectory);\n    saveTraj(nominal_trajectory, t, tube_nominal_traj);\n    tube_nominal_free_energy[t] = fe_stat.nominal_sys.freeEnergyMean;\n    tube_real_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n    tube_nominal_state_used[t] = fe_stat.nominal_state_used;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n    controller.updateNominalState(current_control);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"tube_state_trajectory.npy\", tube_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"tube_nominal_trajectory.npy\", tube_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"tube_nominal_free_energy.npy\", tube_nominal_free_energy.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"tube_real_free_energy.npy\", tube_real_free_energy.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"tube_nominal_state_used.npy\", tube_nominal_state_used.data(), { total_time_horizon }, \"w\");\n}\n\nvoid runTubeRC(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  DoubleIntegratorDynamics::state_array x;\n  x << 2, 0, 0, 1;\n  DoubleIntegratorDynamics::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> tube_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> tube_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> tube_nominal_free_energy(total_time_horizon, 0);\n  std::vector<float> tube_real_free_energy(total_time_horizon, 0);\n  std::vector<float> tube_nominal_state_used(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model(100);\n  RCost cost;\n  Sampler sampler(sampler_params);\n  auto params = cost.getParams();\n  params.crash_cost = 100;\n  cost.setParams(params);\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n  auto controller = TubeMPPIController<Dyn, RCost, Feedback, num_timesteps, 1024>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n  controller.setNominalThreshold(2);\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Tube **********************/\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, tube_trajectory);\n    saveTraj(nominal_trajectory, t, tube_nominal_traj);\n    tube_nominal_free_energy[t] = fe_stat.nominal_sys.freeEnergyMean;\n    tube_real_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n    tube_nominal_state_used[t] = fe_stat.nominal_state_used;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n    controller.updateNominalState(current_control);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"tube_robust_state_trajectory.npy\", tube_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"tube_robust_nominal_trajectory.npy\", tube_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"tube_robust_nominal_free_energy.npy\", tube_nominal_free_energy.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"tube_robust_real_free_energy.npy\", tube_real_free_energy.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"tube_robust_nominal_state_used.npy\", tube_nominal_state_used.data(), { total_time_horizon }, \"w\");\n}\n\nvoid runRobustSc(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  DoubleIntegratorDynamics::state_array x;\n  x << 2, 0, 0, 1;\n  DoubleIntegratorDynamics::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> robust_sc_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> robust_sc_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> robust_sc_nominal_free_energy(total_time_horizon, 0);\n  std::vector<float> robust_sc_real_free_energy(total_time_horizon, 0);\n  std::vector<float> robust_sc_nominal_free_energy_bound(total_time_horizon, 0);\n  std::vector<float> robust_sc_real_free_energy_bound(total_time_horizon, 0);\n  std::vector<float> robust_sc_real_free_energy_growth_bound(total_time_horizon, 0);\n  std::vector<float> robust_sc_nominal_state_used(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model(100);\n  SCost cost;\n  Sampler sampler(sampler_params);\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n  // Value function threshold\n  float value_function_threshold = 20.0;\n  auto controller = RobustMPPIController<Dyn, SCost, Feedback, num_timesteps, 1024, Sampler>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha, value_function_threshold);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Vanilla **********************/\n    // Compute the control\n    controller.updateImportanceSamplingControl(x, 1);\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, robust_sc_trajectory);\n    saveTraj(nominal_trajectory, t, robust_sc_nominal_traj);\n    robust_sc_nominal_free_energy[t] = fe_stat.nominal_sys.freeEnergyMean;\n    robust_sc_real_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n    robust_sc_nominal_free_energy_bound[t] =\n        value_function_threshold + 2 * fe_stat.nominal_sys.freeEnergyModifiedVariance;\n    robust_sc_real_free_energy_bound[t] = 0;\n    robust_sc_real_free_energy_growth_bound[t] = 0;\n    robust_sc_nominal_state_used[t] = fe_stat.nominal_state_used;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"robust_sc_state_trajectory.npy\", robust_sc_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"robust_sc_nominal_trajectory.npy\", robust_sc_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"robust_sc_nominal_free_energy.npy\", robust_sc_nominal_free_energy.data(), { total_time_horizon },\n                 \"w\");\n  cnpy::npy_save(\"robust_sc_real_free_energy.npy\", robust_sc_real_free_energy.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_sc_nominal_state_used.npy\", robust_sc_nominal_state_used.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_sc_real_free_energy_bound.npy\", robust_sc_nominal_free_energy_bound.data(),\n                 { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_sc_nominal_free_energy_bound.npy\", robust_sc_real_free_energy_bound.data(),\n                 { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_sc_real_free_energy_growth_bound.npy\", robust_sc_real_free_energy_growth_bound.data(),\n                 { total_time_horizon }, \"w\");\n}\n\nvoid runRobustRc(const Eigen::Ref<const Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon>>& noise)\n{\n  // Set the initial state\n  DoubleIntegratorDynamics::state_array x;\n  x << 2, 0, 0, 1;\n  DoubleIntegratorDynamics::state_array xdot;\n\n  // control variance\n  Sampler::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < Dyn::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> robust_rc_trajectory(Dyn::STATE_DIM * total_time_horizon, 0);\n  std::vector<float> robust_rc_nominal_traj(Dyn::STATE_DIM * num_timesteps * total_time_horizon, 0);\n  std::vector<float> robust_rc_nominal_free_energy(total_time_horizon, 0);\n  std::vector<float> robust_rc_real_free_energy(total_time_horizon, 0);\n  std::vector<float> robust_rc_nominal_free_energy_bound(total_time_horizon, 0);\n  std::vector<float> robust_rc_real_free_energy_bound(total_time_horizon, 0);\n  std::vector<float> robust_rc_real_free_energy_growth_bound(total_time_horizon, 0);\n  std::vector<float> robust_rc_nominal_free_energy_growth(total_time_horizon, 0);\n  std::vector<float> robust_rc_real_free_energy_growth(total_time_horizon, 0);\n  std::vector<float> robust_rc_nominal_state_used(total_time_horizon, 0);\n\n  // Initialize the controllers\n  Dyn model(100);\n  RCost cost;\n  auto params = cost.getParams();\n  params.crash_cost = 100;\n  cost.setParams(params);\n  Sampler sampler(sampler_params);\n\n  // DDP cost parameters\n  Feedback fb_controller(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  fb_controller.setParams(fb_params);\n\n  // Value function threshold\n  float value_function_threshold = 20.0;\n  auto controller = RobustMPPIController<Dyn, RCost, Feedback, num_timesteps, 1024, Sampler>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha, value_function_threshold);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n\n  // Start the loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    /********************** Robust Robust Cost **********************/\n    // Compute the control\n    controller.updateImportanceSamplingControl(x, 1);\n    controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    controller.computeFeedback(x);\n\n    // Propagate the feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    // Save everything\n    saveState(x, t, robust_rc_trajectory);\n    saveTraj(nominal_trajectory, t, robust_rc_nominal_traj);\n    robust_rc_nominal_free_energy[t] = fe_stat.nominal_sys.freeEnergyMean;\n    robust_rc_real_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n    robust_rc_nominal_free_energy_bound[t] =\n        value_function_threshold + 2 * fe_stat.nominal_sys.freeEnergyModifiedVariance;\n    robust_rc_real_free_energy_bound[t] = fe_stat.nominal_sys.freeEnergyMean +\n                                          cost.getLipshitzConstantCost() * 1 * (x - nominal_trajectory.col(0)).norm();\n    robust_rc_real_free_energy_growth_bound[t] = (value_function_threshold - fe_stat.nominal_sys.freeEnergyMean) +\n                                                 cost.getLipshitzConstantCost() * 8 * 20 * controller.computeDF() +\n                                                 2 * fe_stat.nominal_sys.freeEnergyModifiedVariance;\n    robust_rc_nominal_free_energy_growth[t] = fe_stat.nominal_sys.increase;\n    robust_rc_real_free_energy_growth[t] = fe_stat.real_sys.increase;\n    robust_rc_nominal_state_used[t] = fe_stat.nominal_state_used;\n\n    // Get the open loop control\n    DoubleIntegratorDynamics::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    Dyn::control_array fb_control = controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n    current_control += fb_control;\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add disturbance\n    x += noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n  /************* Save CNPY *********************/\n  cnpy::npy_save(\"robust_rc_state_trajectory.npy\", robust_rc_trajectory.data(),\n                 { total_time_horizon, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"robust_rc_nominal_trajectory.npy\", robust_rc_nominal_traj.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"robust_rc_nominal_free_energy.npy\", robust_rc_nominal_free_energy.data(), { total_time_horizon },\n                 \"w\");\n  cnpy::npy_save(\"robust_rc_real_free_energy.npy\", robust_rc_real_free_energy.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_rc_nominal_state_used.npy\", robust_rc_nominal_state_used.data(), { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_rc_real_free_energy_bound.npy\", robust_rc_real_free_energy_bound.data(),\n                 { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_rc_nominal_free_energy_bound.npy\", robust_rc_nominal_free_energy_bound.data(),\n                 { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_rc_real_free_energy_growth_bound.npy\", robust_rc_real_free_energy_growth_bound.data(),\n                 { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_rc_real_free_energy_growth.npy\", robust_rc_real_free_energy_growth.data(),\n                 { total_time_horizon }, \"w\");\n  cnpy::npy_save(\"robust_rc_nominal_free_energy_growth.npy\", robust_rc_nominal_free_energy_growth.data(),\n                 { total_time_horizon }, \"w\");\n}\n\nint main()\n{\n  // Run the double integrator example on all the controllers with the SAME noise 20 times.\n\n  // Create a random number generator\n  // Random number generator for system noise\n  std::mt19937 gen;  // Standard mersenne_twister_engine which will be seeded\n  std::normal_distribution<float> normal_distribution;\n  gen.seed(7);  // Seed the 7, so everyone gets the same noise\n  normal_distribution = std::normal_distribution<float>(0, 1);\n\n  Eigen::Matrix<float, Dyn::STATE_DIM, total_time_horizon> universal_noise;\n  universal_noise.setZero();\n\n  // Create the noise for all systems\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    for (int i = 2; i < 4; ++i)\n    {\n      universal_noise(i, t) = normal_distribution(gen);\n    }\n  }\n\n  runVanilla(universal_noise);\n  std::cout << \"Finished Vanilla\" << std::endl;\n\n  runVanillaLarge(universal_noise);\n  std::cout << \"Finished Vanilla Large\" << std::endl;\n\n  runVanillaLargeRC(universal_noise);\n  std::cout << \"Finished Vanilla Large with Robust Cost\" << std::endl;\n\n  runTube(universal_noise);\n  std::cout << \"Finished Tube with Standard Cost\" << std::endl;\n\n  runTubeRC(universal_noise);\n  std::cout << \"Finished Tube with Robust Cost\" << std::endl;\n\n  runRobustSc(universal_noise);\n  std::cout << \"Finished RMPPI with Standard Cost\" << std::endl;\n\n  runRobustRc(universal_noise);\n  std::cout << \"Finished RMPPI with Robust Cost\" << std::endl;\n\n  return 0;\n}\n"
  },
  {
    "path": "examples/double_integrator_example.cu",
    "content": "#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/cost_functions/quadratic_cost/quadratic_cost.cuh>\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n\n#include <iomanip>\n// #include <sstream>\n\n#define USE_COLORED_NOISE\n\nconst int TIMESTEPS = 65;\nconst int NUM_ROLLOUTS = 128;\n\nusing DYN = DoubleIntegratorDynamics;\nusing COST = QuadraticCost<DYN>;\nusing FB_CONTROLLER = DDPFeedback<DYN, TIMESTEPS>;\n\n#ifdef USE_COLORED_NOISE\nusing SAMPLER = mppi::sampling_distributions::ColoredNoiseDistribution<DYN::DYN_PARAMS_T>;\n#else\nusing SAMPLER = mppi::sampling_distributions::GaussianDistribution<DYN::DYN_PARAMS_T>;\n#endif\n\nint main()\n{\n  float dt = 0.015;\n\n  // Set the initial state\n  DYN::state_array x;\n  x << -9, -9, 0.1, 0.1;\n  DYN::state_array xdot;\n\n  SAMPLER::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < DYN::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 0.5;\n#ifdef USE_COLORED_NOISE\n    sampler_params.exponents[i] = 1.0;\n#endif\n  }\n  SAMPLER sampler(sampler_params);\n\n  // Create the dynamics, cost function, and feedback controller\n  DYN model;\n  COST cost;\n  FB_CONTROLLER fb_controller = FB_CONTROLLER(&model, dt);\n  auto cost_params = cost.getParams();\n\n  // Set up cost function\n  DYN::state_array x_goal;\n  x_goal << -4, -4, 0, 0;\n  DYN::state_array q_coeffs;\n  q_coeffs << 5, 5, 0.5, 0.5;\n  for (int i = 0; i < DYN::STATE_DIM; i++)\n  {\n    cost_params.s_coeffs[i] = q_coeffs[i];\n    cost_params.s_goal[i] = x_goal[i];\n  }\n  cost.setParams(cost_params);\n\n  // Create MPPI Controller\n  float lambda = 1;\n  float alpha = 1.0;\n  int max_iter = 1;\n  int total_time_horizon = 300;\n\n  auto controller = VanillaMPPIController<DYN, COST, FB_CONTROLLER, TIMESTEPS, NUM_ROLLOUTS, SAMPLER>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller.setParams(controller_params);\n\n  /********************** Vanilla MPPI **********************/\n  float cumulative_cost = 0;\n  int crash = 0;\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    auto nominal_control = controller.getControlSeq();\n    auto fe_stat = controller.getFreeEnergyStatistics();\n\n    DYN::control_array current_control = nominal_control.col(0);\n    // Propagate dynamics forward\n    DYN::output_array y;\n    DYN::state_array x_next;\n    // model.computeDynamics(x, current_control, xdot);\n    // model.updateState(x, xdot, dt);\n    model.step(x, x_next, xdot, current_control, y, t, dt);\n    x = x_next;\n    if (t % 10 == 0)\n    {\n      std::cout << \"T: \" << std::fixed << std::setprecision(3) << t * dt;\n      // << \"s Free Energy: \" << fe_stat.real_sys.freeEnergyMean\n      // << \" +- \" << fe_stat.real_sys.freeEnergyVariance << std::endl;\n      std::cout << \" X: \" << x.transpose() << std::endl;\n    }\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n    cumulative_cost += cost.computeRunningCost(y, current_control, t, &crash);\n  }\n  std::cout << \"Total Cost: \" << cumulative_cost << std::endl;\n\n  return 0;\n}\n"
  },
  {
    "path": "include/mppi/controllers/ColoredMPPI/colored_mppi_controller.cu",
    "content": "#include <mppi/controllers/ColoredMPPI/colored_mppi_controller.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <algorithm>\n#include <iostream>\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n\n#define ColoredMPPI_TEMPLATE                                                                                           \\\n  template <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, class SAMPLING_T,              \\\n            class PARAMS_T>\n#define ColoredMPPI ColoredMPPIController<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, SAMPLING_T, PARAMS_T>\n\nColoredMPPI_TEMPLATE ColoredMPPI::ColoredMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller,\n                                                        SAMPLING_T* sampler, float dt, int max_iter, float lambda,\n                                                        float alpha, int num_timesteps,\n                                                        const Eigen::Ref<const control_trajectory>& init_control_traj,\n                                                        cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha, num_timesteps, init_control_traj,\n                 stream)\n{\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // Copy the noise std_dev to the device\n  // this->copyControlStdDevToDevice();\n  chooseAppropriateKernel();\n}\n\nColoredMPPI_TEMPLATE ColoredMPPI::ColoredMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller,\n                                                        SAMPLING_T* sampler, PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, params, stream)\n{\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // // Copy the noise std_dev to the device\n  // this->copyControlStdDevToDevice();\n  chooseAppropriateKernel();\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::chooseAppropriateKernel()\n{\n  cudaDeviceProp deviceProp;\n  HANDLE_ERROR(cudaGetDeviceProperties(&deviceProp, 0));\n  unsigned single_kernel_byte_size = mppi::kernels::calcRolloutCombinedKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->params_.dynamics_rollout_dim_);\n  unsigned split_dyn_kernel_byte_size = mppi::kernels::calcRolloutDynamicsKernelSharedMemSize(\n      this->model_, this->sampler_, this->params_.dynamics_rollout_dim_);\n  unsigned split_cost_kernel_byte_size =\n      mppi::kernels::calcRolloutCostKernelSharedMemSize(this->cost_, this->sampler_, this->params_.cost_rollout_dim_);\n  unsigned vis_single_kernel_byte_size = mppi::kernels::calcVisualizeKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->getNumTimesteps(), this->params_.visualize_dim_);\n\n  bool too_much_mem_single_kernel = single_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool too_much_mem_vis_kernel = vis_single_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool too_much_mem_split_kernel = split_dyn_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  too_much_mem_split_kernel = too_much_mem_split_kernel || split_cost_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  too_much_mem_single_kernel = too_much_mem_single_kernel || too_much_mem_vis_kernel;\n\n  if (too_much_mem_split_kernel && too_much_mem_single_kernel)\n  {\n    std::string error_msg =\n        \"There is not enough shared memory on the GPU for either rollout kernel option. The combined rollout kernel \"\n        \"takes \" +\n        std::to_string(single_kernel_byte_size) + \" bytes, the cost rollout kernel takes \" +\n        std::to_string(split_cost_kernel_byte_size) + \" bytes, the dynamics rollout kernel takes \" +\n        std::to_string(split_dyn_kernel_byte_size) + \" bytes, the combined visualization kernel takes \" +\n        std::to_string(vis_single_kernel_byte_size) + \" bytes, and the max is \" +\n        std::to_string(deviceProp.sharedMemPerBlock) +\n        \" bytes. Considering lowering the corresponding thread block sizes.\";\n    throw std::runtime_error(error_msg);\n  }\n  else if (too_much_mem_single_kernel)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    return;\n  }\n  else if (too_much_mem_split_kernel)\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    return;\n  }\n\n  // Send the nominal control to the device\n  this->copyNominalControlToDevice(false);\n  state_array zero_state = this->model_->getZeroState();\n  // Send zero state to the device\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, zero_state.data(), DYN_T::STATE_DIM * sizeof(float),\n                               cudaMemcpyHostToDevice, this->stream_));\n  // Generate noise data\n  this->sampler_->generateSamples(1, 0, this->gen_, true);\n\n  float single_kernel_time_ms = std::numeric_limits<float>::infinity();\n  float split_kernel_time_ms = std::numeric_limits<float>::infinity();\n\n  // Evaluate each kernel that is applicable\n  auto start_single_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !too_much_mem_single_kernel; i++)\n  {\n    mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->trajectory_costs_d_,\n        this->params_.dynamics_rollout_dim_, this->stream_, true);\n  }\n  auto end_single_kernel_time = std::chrono::steady_clock::now();\n  auto start_split_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !too_much_mem_split_kernel; i++)\n  {\n    mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->trajectory_costs_d_,\n        this->params_.dynamics_rollout_dim_, this->params_.cost_rollout_dim_, this->stream_, true);\n  }\n  auto end_split_kernel_time = std::chrono::steady_clock::now();\n\n  // calc times\n  if (!too_much_mem_single_kernel)\n  {\n    single_kernel_time_ms = mppi::math::timeDiffms(end_single_kernel_time, start_single_kernel_time);\n  }\n  if (!too_much_mem_split_kernel)\n  {\n    split_kernel_time_ms = mppi::math::timeDiffms(end_split_kernel_time, start_split_kernel_time);\n  }\n  std::string kernel_choice = \"\";\n  if (split_kernel_time_ms < single_kernel_time_ms)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    kernel_choice = \"split \";\n  }\n  else\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    kernel_choice = \"single\";\n  }\n  this->logger_->info(\"Choosing %s kernel based on split taking %f ms and single taking %f ms after %d iterations\\n\",\n                     kernel_choice.c_str(), split_kernel_time_ms, single_kernel_time_ms,\n                     this->getNumKernelEvaluations());\n}\n\nColoredMPPI_TEMPLATE ColoredMPPI::~ColoredMPPIController()\n{\n  // all implemented in standard controller\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::computeControl(const Eigen::Ref<const state_array>& state,\n                                                      int optimization_stride)\n{\n  this->free_energy_statistics_.real_sys.previousBaseline = this->getBaselineCost();\n  state_array local_state = state;\n\n  if (getLeashActive())\n  {\n    this->model_->enforceLeash(state, this->state_.col(leash_jump_), this->params_.state_leash_dist_, local_state);\n  }\n\n  // Send the initial condition to the device\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, local_state.data(), DYN_T::STATE_DIM * sizeof(float),\n                               cudaMemcpyHostToDevice, this->stream_));\n\n  float baseline_prev = 1e8;\n  for (int opt_iter = 0; opt_iter < this->getNumIters(); opt_iter++)\n  {\n    // Send the nominal control to the device\n    this->copyNominalControlToDevice(false);\n\n    // Generate noise data\n    this->sampler_->generateSamples(optimization_stride, opt_iter, this->gen_, false);\n    // Launch the rollout kernel\n    if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n    {\n      mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n          this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->trajectory_costs_d_,\n          this->params_.dynamics_rollout_dim_, this->params_.cost_rollout_dim_, this->stream_, false);\n    }\n    else if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n    {\n      mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n          this->getLambda(), this->getAlpha(), this->initial_state_d_, this->trajectory_costs_d_,\n          this->params_.dynamics_rollout_dim_, this->stream_, false);\n    }\n\n    // Copy the costs back to the host\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    this->setBaseline(mppi::kernels::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS));\n\n    if (this->getBaselineCost() > baseline_prev + 1)\n    {\n      this->logger_->debug(\"Previous Baseline: %f\\n         Baseline: %f\\n\", baseline_prev, this->getBaselineCost());\n    }\n\n    baseline_prev = this->getBaselineCost();\n\n    // Launch the norm exponential kernel\n    if (getGamma() == 0 || getRExp() == 0)\n    {\n      mppi::kernels::launchNormExpKernel(NUM_ROLLOUTS, this->getNormExpThreads(), this->trajectory_costs_d_,\n                                         1.0 / this->getLambda(), this->getBaselineCost(), this->stream_, false);\n    }\n    else\n    {\n      mppi::kernels::launchTsallisKernel(NUM_ROLLOUTS, this->getNormExpThreads(), this->trajectory_costs_d_, getGamma(),\n                                         getRExp(), this->getBaselineCost(), this->stream_, false);\n    }\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n    // Compute the normalizer\n    this->setNormalizer(mppi::kernels::computeNormalizer(this->trajectory_costs_.data(), NUM_ROLLOUTS));\n\n    mppi::kernels::computeFreeEnergy(this->free_energy_statistics_.real_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.real_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.real_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_.data(), NUM_ROLLOUTS, this->getBaselineCost(),\n                                     this->getLambda());\n\n    // Compute the cost weighted average //TODO SUM_STRIDE is BDIM_X, but should it be its own parameter?\n    this->sampler_->updateDistributionParamsFromDevice(this->trajectory_costs_d_, this->getNormalizerCost(), 0, false);\n\n    // Transfer the new control to the host\n    this->sampler_->setHostOptimalControlSequence(this->control_.data(), 0, true);\n  }\n\n  this->free_energy_statistics_.real_sys.normalizerPercent = this->getNormalizerCost() / NUM_ROLLOUTS;\n  this->free_energy_statistics_.real_sys.increase =\n      this->getBaselineCost() - this->free_energy_statistics_.real_sys.previousBaseline;\n  smoothControlTrajectory();\n  computeStateTrajectory(local_state);\n  state_array zero_state = this->model_->getZeroState();\n  for (int i = 0; i < this->getNumTimesteps(); i++)\n  {\n    // this->model_->enforceConstraints(zero_state, this->control_.col(i));\n    this->control_.col(i)[1] =\n        fminf(fmaxf(this->control_.col(i)[1], this->model_->control_rngs_[1].x), this->model_->control_rngs_[1].y);\n  }\n\n  // Copy back sampled trajectories\n  this->copySampledControlFromDevice(false);\n  if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n  {  // copy initial state to vis initial state for use with visualizeKernel\n    HANDLE_ERROR(cudaMemcpyAsync(this->vis_initial_state_d_, this->initial_state_d_, sizeof(float) * DYN_T::STATE_DIM,\n                                 cudaMemcpyDeviceToDevice, this->vis_stream_));\n  }\n  this->copyTopControlFromDevice(true);\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::allocateCUDAMemory()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper();\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::computeStateTrajectory(const Eigen::Ref<const state_array>& x0)\n{\n  this->computeOutputTrajectoryHelper(this->output_, this->state_, x0, this->control_);\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::slideControlSequence(int steps)\n{\n  // TODO does the logic of handling control history reasonable?\n  leash_jump_ = steps;\n  // Save the control history\n  this->saveControlHistoryHelper(steps, this->control_, this->control_history_);\n\n  this->slideControlSequenceHelper(steps, this->control_);\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::smoothControlTrajectory()\n{\n  this->smoothControlTrajectoryHelper(this->control_, this->control_history_);\n}\n\nColoredMPPI_TEMPLATE void ColoredMPPI::calculateSampledStateTrajectories()\n{\n  int num_sampled_trajectories = this->getTotalSampledTrajectories();\n  // controls already copied in compute control\n  if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n  {\n    mppi::kernels::launchVisualizeCostKernel<COST_T, SAMPLING_T>(\n        this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), num_sampled_trajectories,\n        this->getLambda(), this->getAlpha(), this->sampled_outputs_d_, this->sampled_crash_status_d_,\n        this->sampled_costs_d_, this->params_.cost_rollout_dim_, this->stream_, false);\n  }\n  else if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n  {\n    mppi::kernels::launchVisualizeKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), num_sampled_trajectories,\n        this->getLambda(), this->getAlpha(), this->vis_initial_state_d_, this->sampled_outputs_d_,\n        this->sampled_costs_d_, this->sampled_crash_status_d_, this->params_.visualize_dim_, this->stream_, false);\n  }\n  // #if true\n  //   mppi::kernels::launchVisualizeCostKernel<COST_T, SAMPLING_T>(\n  //       this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(),\n  //       num_sampled_trajectories, this->getLambda(), this->getAlpha(), this->sampled_outputs_d_,\n  //       this->sampled_crash_status_d_, this->sampled_costs_d_, this->params_.cost_rollout_dim_, this->vis_stream_,\n  //       false);\n  // #else\n  //   mppi_common::launchVisualizeCostKernel<COST_T, 128, COST_B_Y, 1>(\n  //       this->cost_, this->getDt(), this->getNumTimesteps(), num_sampled_trajectories, this->getLambda(),\n  //       this->getAlpha(), this->sampled_outputs_d_, this->sampled_noise_d_, this->sampled_crash_status_d_,\n  //       this->control_std_dev_d_, this->sampled_costs_d_, this->vis_stream_, false);\n  // #endif\n  for (int i = 0; i < num_sampled_trajectories; i++)\n  {\n    // set initial state to the first location\n    // shifted by one since we do not save the initial state\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_trajectories_[i].data(),\n                                 this->sampled_outputs_d_ + i * this->getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 (this->getNumTimesteps() - 1) * DYN_T::OUTPUT_DIM * sizeof(float),\n                                 cudaMemcpyDeviceToHost, this->vis_stream_));\n    HANDLE_ERROR(\n        cudaMemcpyAsync(this->sampled_costs_[i].data(), this->sampled_costs_d_ + (i * (this->getNumTimesteps() + 1)),\n                        (this->getNumTimesteps() + 1) * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_crash_status_[i].data(),\n                                 this->sampled_crash_status_d_ + (i * this->getNumTimesteps()),\n                                 this->getNumTimesteps() * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n}\n\n#undef ColoredMPPI\n"
  },
  {
    "path": "include/mppi/controllers/ColoredMPPI/colored_mppi_controller.cuh",
    "content": "/**\n * Created by jason on 10/30/19.\n * Creates the API for interfacing with an MPPI controller\n * should define a compute_control based on state as well\n * as return timing info\n **/\n\n#ifndef MPPIGENERIC_COLORED_MPPI_CONTROLLER_CUH\n#define MPPIGENERIC_COLORED_MPPI_CONTROLLER_CUH\n\n#include <mppi/controllers/controller.cuh>\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n\n#include <vector>\n\ntemplate <int S_DIM, int C_DIM, int MAX_TIMESTEPS>\nstruct ColoredMPPIParams : public ControllerParams<S_DIM, C_DIM, MAX_TIMESTEPS>\n{\n  float r = 2.0;\n  float gamma = 0;\n  Eigen::Matrix<float, S_DIM, 1> state_leash_dist_ = Eigen::Matrix<float, S_DIM, 1>::Zero();\n\n  ColoredMPPIParams() = default;\n  ColoredMPPIParams(const ColoredMPPIParams<S_DIM, C_DIM, MAX_TIMESTEPS>& other)\n  {\n    typedef ControllerParams<S_DIM, C_DIM, MAX_TIMESTEPS> BASE;\n    const BASE& other_item_ref = other;\n    *(static_cast<BASE*>(this)) = other_item_ref;\n    // this->colored_noise_exponents_ = other.colored_noise_exponents_;\n  }\n\n  ColoredMPPIParams(ColoredMPPIParams<S_DIM, C_DIM, MAX_TIMESTEPS>& other)\n  {\n    typedef ControllerParams<S_DIM, C_DIM, MAX_TIMESTEPS> BASE;\n    BASE& other_item_ref = other;\n    *(static_cast<BASE*>(this)) = other_item_ref;\n    // this->colored_noise_exponents_ = other.colored_noise_exponents_;\n  }\n};\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS,\n          class SAMPLING_T = ::mppi::sampling_distributions::ColoredNoiseDistribution<typename DYN_T::DYN_PARAMS_T>,\n          class PARAMS_T = ColoredMPPIParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>>\nclass ColoredMPPIController : public Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T>\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  typedef Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T> PARENT_CLASS;\n  // need control_array = ... so that we can initialize\n  // Eigen::Matrix with Eigen::Matrix::Zero();\n  using control_array = typename PARENT_CLASS::control_array;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using state_trajectory = typename PARENT_CLASS::state_trajectory;\n  using state_array = typename PARENT_CLASS::state_array;\n  using sampled_cost_traj = typename PARENT_CLASS::sampled_cost_traj;\n  using FEEDBACK_GPU = typename PARENT_CLASS::TEMPLATED_FEEDBACK_GPU;\n\n  /**\n   *\n   * Public member functions\n   */\n  // Constructor\n  ColoredMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt, int max_iter,\n                        float lambda, float alpha, int num_timesteps = MAX_TIMESTEPS,\n                        const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n                        cudaStream_t stream = nullptr);\n\n  ColoredMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n                        cudaStream_t stream = nullptr);\n\n  // Destructor\n  ~ColoredMPPIController();\n\n  std::string getControllerName() const override\n  {\n    return \"Colored MPPI\";\n  };\n\n  /**\n   * computes a new control sequence\n   * @param state starting position\n   */\n  void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride = 1) override;\n\n  /**\n   * Slide the control sequence back n steps\n   */\n  void slideControlSequence(int optimization_stride) override;\n\n  void setPercentageSampledControlTrajectories(float new_perc)\n  {\n    this->setPercentageSampledControlTrajectoriesHelper(new_perc, 1);\n  }\n\n  void setGamma(float gamma)\n  {\n    this->params_.gamma = gamma;\n  }\n\n  float getGamma()\n  {\n    return this->params_.gamma;\n  }\n\n  void setRExp(float r)\n  {\n    this->params_.r = r;\n  }\n\n  float getRExp()\n  {\n    return this->params_.r;\n  }\n\n  void setOffsetDecayRate(float decay_rate)\n  {\n    this->sampler_->setOffsetDecayRate(decay_rate);\n  }\n\n  float getOffsetDecayRate()\n  {\n    return this->sampler_->getOffsetDecayRate();\n  }\n\n  void setColoredNoiseExponents(std::vector<float>& new_exponents)\n  {\n    auto sampler_params = this->sampler_->getParams();\n    for (int i = 0; i < new_exponents.size(); i++)\n    {\n      sampler_params.exponents[i] = new_exponents[i];\n    }\n    this->sampler_->setParams(sampler_params);\n  }\n\n  float getColoredNoiseExponent(int index)\n  {\n    auto sampler_params = this->sampler_->getParams();\n    return sampler_params.exponents[index];\n  }\n\n  std::vector<float> getColoredNoiseExponents()\n  {\n    std::vector<float> exponents;\n    auto sampler_params = this->sampler_->getParams();\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      exponents.push_back(sampler_params.exponents[i]);\n    }\n    return exponents;\n  }\n\n  void setNoiseDecay(float new_noise_decay)\n  {\n    auto sampler_params = this->sampler_->getParams();\n    sampler_params.std_dev_decay = new_noise_decay;\n    this->sampler_->setParams(sampler_params);\n  }\n\n  void setStateLeashLength(float new_state_leash, int index = 0)\n  {\n    this->params_.state_leash_dist_[index] = new_state_leash;\n  }\n\n  float getStateLeashLength(int index)\n  {\n    return this->params_.state_leash_dist_[index];\n  }\n\n  bool getLeashActive()\n  {\n    return leash_active_;\n  }\n\n  void setLeashActive(bool new_leash_active)\n  {\n    leash_active_ = new_leash_active;\n  }\n\n  void calculateSampledStateTrajectories() override;\n\n  void chooseAppropriateKernel() override;\n\nprotected:\n  std::vector<float>& getColoredNoiseExponentsLValue()\n  {\n    return this->params_.colored_noise_exponents_;\n  }\n\n  void computeStateTrajectory(const Eigen::Ref<const state_array>& x0);\n\n  void smoothControlTrajectory();\n  int leash_jump_ = 1;\n  bool leash_active_ = false;\n  float control_std_dev_decay_ = 1.0;\n\nprivate:\n  // ======== MUST BE OVERWRITTEN =========\n  void allocateCUDAMemory();\n  // ======== END MUST BE OVERWRITTEN =====\n};\n\n#if __CUDACC__\n#include \"colored_mppi_controller.cu\"\n#endif\n\n#endif  // MPPIGENERIC_COLORED_MPPI_CONTROLLER_CUH\n"
  },
  {
    "path": "include/mppi/controllers/MPPI/mppi_controller.cu",
    "content": "#include <atomic>\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <algorithm>\n#include <iostream>\n#include <stdexcept>\n#include <string>\n\n#define VANILLA_MPPI_TEMPLATE                                                                                          \\\n  template <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, class SAMPLING_T,              \\\n            class PARAMS_T>\n\n#define VanillaMPPI VanillaMPPIController<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, SAMPLING_T, PARAMS_T>\n\nVANILLA_MPPI_TEMPLATE\nVanillaMPPI::VanillaMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt,\n                                   int max_iter, float lambda, float alpha, int num_timesteps,\n                                   const Eigen::Ref<const control_trajectory>& init_control_traj, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha, num_timesteps, init_control_traj,\n                 stream)\n{\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // Copy the noise std_dev to the device\n  // this->copyControlStdDevToDevice();\n\n  chooseAppropriateKernel();\n}\n\nVANILLA_MPPI_TEMPLATE\nVanillaMPPI::VanillaMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler,\n                                   PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, params, stream)\n{\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // // Copy the noise std_dev to the device\n  // this->copyControlStdDevToDevice();\n  chooseAppropriateKernel();\n}\n\nVANILLA_MPPI_TEMPLATE\nvoid VanillaMPPI::chooseAppropriateKernel()\n{\n  cudaDeviceProp deviceProp;\n  HANDLE_ERROR(cudaGetDeviceProperties(&deviceProp, 0));\n  unsigned single_kernel_byte_size = mppi::kernels::calcRolloutCombinedKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->params_.dynamics_rollout_dim_);\n  unsigned split_dyn_kernel_byte_size = mppi::kernels::calcRolloutDynamicsKernelSharedMemSize(\n      this->model_, this->sampler_, this->params_.dynamics_rollout_dim_);\n  unsigned split_cost_kernel_byte_size =\n      mppi::kernels::calcRolloutCostKernelSharedMemSize(this->cost_, this->sampler_, this->params_.cost_rollout_dim_);\n  unsigned vis_single_kernel_byte_size = mppi::kernels::calcVisualizeKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->getNumTimesteps(), this->params_.visualize_dim_);\n\n  bool too_much_mem_single_kernel = single_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool too_much_mem_vis_kernel = vis_single_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool too_much_mem_split_kernel = split_dyn_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  too_much_mem_split_kernel = too_much_mem_split_kernel || split_cost_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  too_much_mem_single_kernel = too_much_mem_single_kernel || too_much_mem_vis_kernel;\n\n  if (too_much_mem_split_kernel && too_much_mem_single_kernel)\n  {\n    std::string error_msg =\n        \"There is not enough shared memory on the GPU for either rollout kernel option. The combined rollout kernel \"\n        \"takes \" +\n        std::to_string(single_kernel_byte_size) + \" bytes, the cost rollout kernel takes \" +\n        std::to_string(split_cost_kernel_byte_size) + \" bytes, the dynamics rollout kernel takes \" +\n        std::to_string(split_dyn_kernel_byte_size) + \" bytes, the combined visualization kernel takes \" +\n        std::to_string(vis_single_kernel_byte_size) + \" bytes, and the max is \" +\n        std::to_string(deviceProp.sharedMemPerBlock) +\n        \" bytes. Considering lowering the corresponding thread block sizes.\";\n    throw std::runtime_error(error_msg);\n  }\n  else if (too_much_mem_single_kernel)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    return;\n  }\n  else if (too_much_mem_split_kernel)\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    return;\n  }\n\n  // Send the nominal control to the device\n  this->copyNominalControlToDevice(false);\n  state_array zero_state = this->model_->getZeroState();\n  // Send zero state to the device\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, zero_state.data(), DYN_T::STATE_DIM * sizeof(float),\n                               cudaMemcpyHostToDevice, this->stream_));\n  // Generate noise data\n  this->sampler_->generateSamples(1, 0, this->gen_, true);\n\n  float single_kernel_time_ms = std::numeric_limits<float>::infinity();\n  float split_kernel_time_ms = std::numeric_limits<float>::infinity();\n\n  // Evaluate each kernel that is applicable\n  auto start_single_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !too_much_mem_single_kernel; i++)\n  {\n    mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->trajectory_costs_d_,\n        this->params_.dynamics_rollout_dim_, this->stream_, true);\n  }\n  auto end_single_kernel_time = std::chrono::steady_clock::now();\n  auto start_split_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !too_much_mem_split_kernel; i++)\n  {\n    mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->trajectory_costs_d_,\n        this->params_.dynamics_rollout_dim_, this->params_.cost_rollout_dim_, this->stream_, true);\n  }\n  auto end_split_kernel_time = std::chrono::steady_clock::now();\n\n  // calc times\n  if (!too_much_mem_single_kernel)\n  {\n    single_kernel_time_ms = mppi::math::timeDiffms(end_single_kernel_time, start_single_kernel_time);\n  }\n  if (!too_much_mem_split_kernel)\n  {\n    split_kernel_time_ms = mppi::math::timeDiffms(end_split_kernel_time, start_split_kernel_time);\n  }\n  std::string kernel_choice = \"\";\n  if (split_kernel_time_ms < single_kernel_time_ms)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    kernel_choice = \"split \";\n  }\n  else\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    kernel_choice = \"single\";\n  }\n  this->logger_->info(\"Choosing %s kernel based on split taking %f ms and single taking %f ms after %d iterations\\n\",\n                     kernel_choice.c_str(), split_kernel_time_ms, single_kernel_time_ms,\n                     this->getNumKernelEvaluations());\n}\n\nVANILLA_MPPI_TEMPLATE\nVanillaMPPI::~VanillaMPPIController()\n{\n  // all implemented in standard controller\n}\n\nVANILLA_MPPI_TEMPLATE\nvoid VanillaMPPI::computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride)\n{\n  this->free_energy_statistics_.real_sys.previousBaseline = this->getBaselineCost();\n\n  // Send the initial condition to the device\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, state.data(), DYN_T::STATE_DIM * sizeof(float),\n                               cudaMemcpyHostToDevice, this->stream_));\n\n  float baseline_prev = 1e8;\n\n  for (int opt_iter = 0; opt_iter < this->getNumIters(); opt_iter++)\n  {\n    // Send the nominal control to the device\n    this->copyNominalControlToDevice(false);\n\n    // Generate noise data\n    this->sampler_->generateSamples(optimization_stride, opt_iter, this->gen_, false);\n\n    // Launch the rollout kernel\n    if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n    {\n      mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n          this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->trajectory_costs_d_,\n          this->params_.dynamics_rollout_dim_, this->params_.cost_rollout_dim_, this->stream_, false);\n    }\n    else if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n    {\n      mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n          this->getLambda(), this->getAlpha(), this->initial_state_d_, this->trajectory_costs_d_,\n          this->params_.dynamics_rollout_dim_, this->stream_, false);\n    }\n\n    // Copy the costs back to the host\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    this->setBaseline(mppi::kernels::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS));\n\n    if (this->getBaselineCost() > baseline_prev + 1)\n    {\n      this->logger_->debug(\"Previous Baseline: %f\\n         Baseline: %f\\n\", baseline_prev, this->getBaselineCost());\n    }\n\n    baseline_prev = this->getBaselineCost();\n\n    // Launch the norm exponential kernel\n    mppi::kernels::launchNormExpKernel(NUM_ROLLOUTS, this->getNormExpThreads(), this->trajectory_costs_d_,\n                                       1.0 / this->getLambda(), this->getBaselineCost(), this->stream_, false);\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    // Compute the normalizer\n    this->setNormalizer(mppi::kernels::computeNormalizer(this->trajectory_costs_.data(), NUM_ROLLOUTS));\n\n    mppi::kernels::computeFreeEnergy(this->free_energy_statistics_.real_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.real_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.real_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_.data(), NUM_ROLLOUTS, this->getBaselineCost(),\n                                     this->getLambda());\n\n    this->sampler_->updateDistributionParamsFromDevice(this->trajectory_costs_d_, this->getNormalizerCost(), 0, false);\n\n    // Transfer the new control to the host\n    this->sampler_->setHostOptimalControlSequence(this->control_.data(), 0, true);\n  }\n\n  this->free_energy_statistics_.real_sys.normalizerPercent = this->getNormalizerCost() / NUM_ROLLOUTS;\n  this->free_energy_statistics_.real_sys.increase =\n      this->getBaselineCost() - this->free_energy_statistics_.real_sys.previousBaseline;\n  smoothControlTrajectory();\n  computeStateTrajectory(state);\n  state_array zero_state = this->model_->getZeroState();\n  for (int i = 0; i < this->getNumTimesteps(); i++)\n  {\n    this->model_->enforceConstraints(zero_state, this->control_.col(i));\n  }\n\n  // Copy back sampled trajectories\n  this->copySampledControlFromDevice(false);\n  if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n  {  // copy initial state to vis initial state for use with visualizeKernel\n    HANDLE_ERROR(cudaMemcpyAsync(this->vis_initial_state_d_, this->initial_state_d_, sizeof(float) * DYN_T::STATE_DIM,\n                                 cudaMemcpyDeviceToDevice, this->vis_stream_));\n  }\n  this->copyTopControlFromDevice(true);\n}\n\nVANILLA_MPPI_TEMPLATE\nvoid VanillaMPPI::allocateCUDAMemory()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper();\n}\n\nVANILLA_MPPI_TEMPLATE\nvoid VanillaMPPI::computeStateTrajectory(const Eigen::Ref<const state_array>& x0)\n{\n  this->computeOutputTrajectoryHelper(this->output_, this->state_, x0, this->control_);\n}\n\nVANILLA_MPPI_TEMPLATE\nvoid VanillaMPPI::smoothControlTrajectory()\n{\n  this->smoothControlTrajectoryHelper(this->control_, this->control_history_);\n}\n\nVANILLA_MPPI_TEMPLATE\nvoid VanillaMPPI::calculateSampledStateTrajectories()\n{\n  int num_sampled_trajectories = this->getTotalSampledTrajectories();\n\n  // control already copied in compute control, so run kernel\n  if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n  {\n    mppi::kernels::launchVisualizeCostKernel<COST_T, SAMPLING_T>(\n        this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), num_sampled_trajectories,\n        this->getLambda(), this->getAlpha(), this->sampled_outputs_d_, this->sampled_crash_status_d_,\n        this->sampled_costs_d_, this->params_.cost_rollout_dim_, this->stream_, false);\n  }\n  else if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n  {\n    mppi::kernels::launchVisualizeKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), num_sampled_trajectories,\n        this->getLambda(), this->getAlpha(), this->vis_initial_state_d_, this->sampled_outputs_d_,\n        this->sampled_costs_d_, this->sampled_crash_status_d_, this->params_.visualize_dim_, this->stream_, false);\n  }\n\n  for (int i = 0; i < num_sampled_trajectories; i++)\n  {\n    // set initial state to the first location\n    // shifted by one since we do not save the initial state\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_trajectories_[i].data(),\n                                 this->sampled_outputs_d_ + i * this->getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 (this->getNumTimesteps() - 1) * DYN_T::OUTPUT_DIM * sizeof(float),\n                                 cudaMemcpyDeviceToHost, this->vis_stream_));\n    HANDLE_ERROR(\n        cudaMemcpyAsync(this->sampled_costs_[i].data(), this->sampled_costs_d_ + (i * (this->getNumTimesteps() + 1)),\n                        (this->getNumTimesteps() + 1) * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_crash_status_[i].data(),\n                                 this->sampled_crash_status_d_ + (i * this->getNumTimesteps()),\n                                 this->getNumTimesteps() * sizeof(int), cudaMemcpyDeviceToHost, this->vis_stream_));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n}\n\n#undef VANILLA_MPPI_TEMPLATE\n#undef VanillaMPPI\n"
  },
  {
    "path": "include/mppi/controllers/MPPI/mppi_controller.cuh",
    "content": "/**\n * Created by jason on 10/30/19.\n * Creates the API for interfacing with an MPPI controller\n * should define a compute_control based on state as well\n * as return timing info\n **/\n\n#ifndef MPPIGENERIC_MPPI_CONTROLLER_CUH\n#define MPPIGENERIC_MPPI_CONTROLLER_CUH\n\n#include <mppi/controllers/controller.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS,\n          class SAMPLING_T = ::mppi::sampling_distributions::GaussianDistribution<typename DYN_T::DYN_PARAMS_T>,\n          class PARAMS_T = ControllerParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>>\nclass VanillaMPPIController : public Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T>\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  // nAeed control_array = ... so that we can initialize\n  // Eigen::Matrix with Eigen::Matrix::Zero();\n  typedef Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T> PARENT_CLASS;\n  using control_array = typename PARENT_CLASS::control_array;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using state_trajectory = typename PARENT_CLASS::state_trajectory;\n  using state_array = typename PARENT_CLASS::state_array;\n  using sampled_cost_traj = typename PARENT_CLASS::sampled_cost_traj;\n  using FEEDBACK_GPU = typename PARENT_CLASS::TEMPLATED_FEEDBACK_GPU;\n\n  /**\n   *\n   * Public member functions\n   */\n  // Constructor\n  VanillaMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt, int max_iter,\n                        float lambda, float alpha, int num_timesteps = MAX_TIMESTEPS,\n                        const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n                        cudaStream_t stream = nullptr);\n  VanillaMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n                        cudaStream_t stream = nullptr);\n\n  // Destructor\n  ~VanillaMPPIController();\n\n  std::string getControllerName() const override\n  {\n    return \"Vanilla MPPI\";\n  };\n\n  /**\n   * computes a new control sequence\n   * @param state starting position\n   */\n  void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride = 1) override;\n\n  void setPercentageSampledControlTrajectories(float new_perc)\n  {\n    this->setPercentageSampledControlTrajectoriesHelper(new_perc, 1);\n  }\n\n  void calculateSampledStateTrajectories() override;\n\n  void chooseAppropriateKernel() override;\n\nprotected:\n  void computeStateTrajectory(const Eigen::Ref<const state_array>& x0);\n\n  void smoothControlTrajectory();\n\nprivate:\n  // ======== MUST BE OVERWRITTEN =========\n  void allocateCUDAMemory();\n  // ======== END MUST BE OVERWRITTEN =====\n};\n\n#if __CUDACC__\n#include \"mppi_controller.cu\"\n#endif\n\n#endif  // MPPIGENERIC_MPPI_CONTROLLER_CUH\n"
  },
  {
    "path": "include/mppi/controllers/Primitives/primitives_controller.cu",
    "content": "#include <mppi/controllers/Primitives/primitives_controller.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <algorithm>\n#include <iostream>\n#include <mppi/sampling_distributions/piecewise_linear/piecewise_linear_noise.cuh>\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n\n#define Primitives                                                                                                     \\\n  PrimitivesController<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, BDIM_X, BDIM_Y, COST_B_X, COST_B_Y, PARAMS_T>\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nPrimitives::PrimitivesController(DYN_T* model, COST_T* cost, FB_T* fb_controller, float dt, int max_iter, float lambda,\n                                 float alpha, const Eigen::Ref<const control_array>& control_std_dev, int num_timesteps,\n                                 const Eigen::Ref<const control_trajectory>& init_control_traj, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, dt, max_iter, lambda, alpha, control_std_dev, num_timesteps,\n                 init_control_traj, stream)\n{\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n  std::vector<float> tmp_vec(DYN_T::CONTROL_DIM, 0.0);\n  getColoredNoiseExponentsLValue() = std::move(tmp_vec);\n  getScalePiecewiseNoiseLValue() = std::move(tmp_vec);\n\n  // Copy the noise std_dev to the device\n  this->copyControlStdDevToDevice();\n\n  control_mppi_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nPrimitives::PrimitivesController(DYN_T* model, COST_T* cost, FB_T* fb_controller, PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, params, stream)\n{\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n  if (this->getColoredNoiseExponentsLValue().size() == 0)\n  {\n    std::vector<float> tmp_vec(DYN_T::CONTROL_DIM, 0.0);\n    getColoredNoiseExponentsLValue() = std::move(tmp_vec);\n  }\n  if (this->getScalePiecewiseNoiseLValue().size() == 0)\n  {\n    std::vector<float> tmp_vec(DYN_T::CONTROL_DIM, 0.0);\n    getScalePiecewiseNoiseLValue() = std::move(tmp_vec);\n  }\n\n  // Copy the noise std_dev to the device\n  this->copyControlStdDevToDevice();\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nPrimitives::~PrimitivesController()\n{\n  // all implemented in standard controller\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride)\n{\n  // this->free_energy_statistics_.real_sys.previousBaseline = this->getBaselineCost();\n  state_array local_state = state;\n\n  if (getLeashActive())\n  {\n    this->model_->enforceLeash(state, this->state_.col(leash_jump_), this->params_.state_leash_dist_, local_state);\n  }\n\n  // Send the initial condition to the device\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, local_state.data(), DYN_T::STATE_DIM * sizeof(float),\n                               cudaMemcpyHostToDevice, this->stream_));\n\n  /////////////////\n  // BEGIN INTERMEDIATE PLANNER\n  // Compute intermediate plan using piecewise linear noise, and choosing the best\n\n  int prev_controls_idx = 1;\n  float primitives_baseline = 0.0;\n  float baseline_prev = 0.0;\n  int best_idx = -1;\n\n  // Send the nominal control to the device\n  this->copyNominalControlToDevice(false);\n\n  for (int opt_iter = 0; opt_iter < getNumPrimitiveIterations(); opt_iter++)\n  {\n    powerlaw_psd_gaussian(getColoredNoiseExponentsLValue(), this->getNumTimesteps(), NUM_ROLLOUTS,\n                          this->control_noise_d_, 0, this->gen_, this->getOffsetDecayRate(), this->stream_);\n\n    // Generate piecewise linear noise data, update control_noise_d_\n    piecewise_linear_noise(this->getNumTimesteps(), NUM_ROLLOUTS, DYN_T::CONTROL_DIM, getPiecewiseSegments(),\n                           optimization_stride, getScalePiecewiseNoiseLValue(), getFracRandomNoiseTrajLValue(),\n                           getScaleAddNominalNoiseLValue(), this->control_d_, this->control_noise_d_,\n                           this->control_std_dev_d_, this->gen_, this->stream_);\n\n    // // Set nominal controls to zero because we want to use the noise directly\n    // this->control_ = control_trajectory::Zero();\n\n    // // Send the zero nominal control to the device\n    // this->copyNominalControlToDevice();\n\n    // Launch the rollout kernel\n    mppi_common::launchFastRolloutKernel<DYN_T, COST_T, NUM_ROLLOUTS, BDIM_X, BDIM_Y, 1, COST_B_X, COST_B_Y>(\n        this->model_->model_d_, this->cost_->cost_d_, this->getDt(), this->getNumTimesteps(), optimization_stride,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->control_d_,\n        this->control_noise_d_, this->control_std_dev_d_, this->trajectory_costs_d_, this->stream_, false);\n\n    // Copy the costs back to the host\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    primitives_baseline = mppi_common::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS);\n\n    // get previous control cost (at index = 1, since index = 0 is zero control traj)\n    baseline_prev = this->trajectory_costs_.data()[prev_controls_idx];\n    if (this->debug_)\n    {\n      std::cerr << \"Previous Baseline: \" << baseline_prev << \"         Baseline: \" << this->getBaselineCost()\n                << std::endl;\n    }\n\n    // if baseline is too high and trajectory is unsafe, create and issue a stopping trajectory\n    // reminder:  baseline_ is the average cost along trajectory\n    if (getStoppingCostThreshold() > 0 && primitives_baseline > getStoppingCostThreshold())\n    {\n      std::cerr << \"Baseline is too high, issuing stopping trajectory!\" << std::endl;\n      computeStoppingTrajectory(local_state);\n      primitives_baseline = std::numeric_limits<float>::min();\n    }\n    // else if (primitives_baseline > baseline_prev - getHysteresisCostThreshold())\n    // {\n    //   // baseline is not decreasing enough, use controls from the previous iteration\n    //   if (this->debug_)\n    //   {\n    //     std::cout << \"Not enough improvement, use prev controls.\" << std::endl;\n    //   }\n    //   HANDLE_ERROR(cudaMemcpyAsync(\n    //       this->control_.data(),\n    //       this->control_noise_d_ + prev_controls_idx * this->getNumTimesteps() * DYN_T::CONTROL_DIM,\n    //       sizeof(float) * this->getNumTimesteps() * DYN_T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n\n    //   primitives_baseline = baseline_prev;\n    // }\n    else\n    {  // otherwise, update the nominal control\n      // Copy best control from device to the host\n      best_idx = mppi_common::computeBestIndex(this->trajectory_costs_.data(), NUM_ROLLOUTS);\n      HANDLE_ERROR(cudaMemcpyAsync(\n          this->control_.data(), this->control_noise_d_ + best_idx * this->getNumTimesteps() * DYN_T::CONTROL_DIM,\n          sizeof(float) * this->getNumTimesteps() * DYN_T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n    }\n\n    this->copyNominalControlToDevice(false);\n\n    cudaStreamSynchronize(this->stream_);\n  }\n\n  // Copy back sampled trajectories for visualization\n  if (getVisualizePrimitives())\n  {\n    this->copySampledControlFromDevice(false);\n    this->copyTopControlFromDevice(true);\n  }\n\n  //  END INTERMEDIATE PLANNER\n  ////////////////\n\n  ////////////////\n  // BEGIN MPPI\n  for (int opt_iter = 0; opt_iter < this->getNumIters(); opt_iter++)\n  {\n    // Send the nominal control to the device\n    copyMPPIControlToDevice(false);\n\n    // Generate noise data\n    // const int colored_num_timesteps = (this->getNumTimesteps() > optimization_stride) ?\n    //                                       this->getNumTimesteps() - optimization_stride :\n    //                                       this->getNumTimesteps();\n    // const int colored_stride = (this->getNumTimesteps() > optimization_stride) ? optimization_stride : 0;\n    // if (colored_stride == 0)\n    // {\n    //   std::cout << \"We tripped the fail-safe\" << std::endl;\n    // }\n    powerlaw_psd_gaussian(getColoredNoiseExponentsLValue(), this->getNumTimesteps(), NUM_ROLLOUTS,\n                          this->control_noise_d_, 0, this->gen_, this->getOffsetDecayRate(), this->stream_);\n    // curandGenerateNormal(this->gen_, this->control_noise_d_, NUM_ROLLOUTS * this->getNumTimesteps() *\n    // DYN_T::CONTROL_DIM,\n    //                      0.0, 1.0);\n    /*\n    std::vector<float> noise = this->getSampledNoise();\n    float mean = 0;\n    for(int k = 0; k < noise.size(); k++) {\n      mean += (noise[k]/noise.size());\n    }\n\n    float std_dev = 0;\n    for(int k = 0; k < noise.size(); k++) {\n      std_dev += powf(noise[k] - mean, 2);\n    }\n    std_dev = sqrt(std_dev/noise.size());\n    printf(\"CPU 1 side N(%f, %f)\\n\", mean, std_dev);\n     */\n\n    // Launch the rollout kernel\n    mppi_common::launchFastRolloutKernel<DYN_T, COST_T, NUM_ROLLOUTS, BDIM_X, BDIM_Y, 1, COST_B_X, COST_B_Y>(\n        this->model_->model_d_, this->cost_->cost_d_, this->getDt(), this->getNumTimesteps(), optimization_stride,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, control_mppi_d_,\n        this->control_noise_d_, this->control_std_dev_d_, this->trajectory_costs_d_, this->stream_, false);\n    /*\n    noise = this->getSampledNoise();\n    mean = 0;\n    for(int k = 0; k < noise.size(); k++) {\n      mean += (noise[k]/noise.size());\n    }\n\n    std_dev = 0;\n    for(int k = 0; k < noise.size(); k++) {\n      std_dev += powf(noise[k] - mean, 2);\n    }\n    std_dev = sqrt(std_dev/noise.size());\n    printf(\"CPU 2 side N(%f, %f)\\n\", mean, std_dev);\n     */\n\n    // Copy the costs back to the host\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    this->setBaseline(mppi_common::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS));\n\n    // if (this->getBaselineCost() > baseline_prev + 1)\n    // {\n    //   // TODO handle printing\n    //   if (this->debug_)\n    //   {\n    //     std::cout << \"Previous Baseline: \" << baseline_prev << std::endl;\n    //     std::cout << \"         Baseline: \" << this->getBaselineCost() << std::endl;\n    //   }\n    // }\n\n    // baseline_prev = this->getBaselineCost();\n\n    // Launch the norm exponential kernel\n    if (getGamma() == 0 || getRExp() == 0)\n    {\n      mppi_common::launchNormExpKernel(NUM_ROLLOUTS, BDIM_X, this->trajectory_costs_d_, 1.0 / this->getLambda(),\n                                       this->getBaselineCost(), this->stream_, false);\n    }\n    else\n    {\n      mppi_common::launchTsallisKernel(NUM_ROLLOUTS, BDIM_X, this->trajectory_costs_d_, getGamma(), getRExp(),\n                                       this->getBaselineCost(), this->stream_, false);\n    }\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    // Compute the normalizer\n    this->setNormalizer(mppi_common::computeNormalizer(this->trajectory_costs_.data(), NUM_ROLLOUTS));\n\n    mppi_common::computeFreeEnergy(this->free_energy_statistics_.real_sys.freeEnergyMean,\n                                   this->free_energy_statistics_.real_sys.freeEnergyVariance,\n                                   this->free_energy_statistics_.real_sys.freeEnergyModifiedVariance,\n                                   this->trajectory_costs_.data(), NUM_ROLLOUTS, this->getBaselineCost(),\n                                   this->getLambda());\n\n    // Compute the cost weighted average //TODO SUM_STRIDE is BDIM_X, but should it be its own parameter?\n    mppi_common::launchWeightedReductionKernel<DYN_T, NUM_ROLLOUTS, BDIM_X>(\n        this->trajectory_costs_d_, this->control_noise_d_, control_mppi_d_, this->getNormalizerCost(),\n        this->getNumTimesteps(), this->stream_, false);\n\n    /*\n    noise = this->getSampledNoise();\n    mean = 0;\n    for(int k = 0; k < noise.size(); k++) {\n      mean += (noise[k]/noise.size());\n    }\n\n    std_dev = 0;\n    for(int k = 0; k < noise.size(); k++) {\n      std_dev += powf(noise[k] - mean, 2);\n    }\n    std_dev = sqrt(std_dev/noise.size());\n    printf(\"CPU 3 side N(%f, %f)\\n\", mean, std_dev);\n     */\n\n    // Transfer the new control to the host\n    HANDLE_ERROR(cudaMemcpyAsync(control_mppi_.data(), control_mppi_d_,\n                                 sizeof(float) * this->getNumTimesteps() * DYN_T::CONTROL_DIM, cudaMemcpyDeviceToHost,\n                                 this->stream_));\n    cudaStreamSynchronize(this->stream_);\n  }\n\n  this->free_energy_statistics_.real_sys.normalizerPercent = this->getNormalizerCost() / NUM_ROLLOUTS;\n  this->free_energy_statistics_.real_sys.increase =\n      this->getBaselineCost() - this->free_energy_statistics_.real_sys.previousBaseline;\n\n  // END MPPI\n  ////////////////////////\n\n  // decide between using the MPPI control or the primitives control\n  if (this->debug_)\n  {\n    std::cerr << \"mppi baseline: \" << this->getBaselineCost() << \", primitives baseline: \" << primitives_baseline\n              << \", prev baseline: \" << baseline_prev << std::endl;\n  }\n  if ((getNumPrimitiveIterations() == 0 && this->getNumIters() > 0) ||\n      ((getNumPrimitiveIterations() > 0 && this->getNumIters() > 0) &&\n       (this->getBaselineCost() < primitives_baseline + getHysteresisCostThreshold())))\n  {\n    this->control_ = control_mppi_;\n    this->copyNominalControlToDevice();\n    if (this->debug_)\n    {\n      std::cout << \"Using MPPI control\" << std::endl;\n    }\n    this->free_energy_statistics_.nominal_state_used = 0;\n  }\n  else\n  {\n    // control_mppi_ = this->control_; // don't do this, we want to save the MPPI control\n    if (this->debug_)\n    {\n      std::cout << \"Using primitives control, \";\n    }\n    if (best_idx > 0 && best_idx <= int((getFracRandomNoiseTrajLValue())[0] * NUM_ROLLOUTS))\n    {\n      if (this->debug_)\n      {\n        std::cout << \"colored noise added to nominal.\" << std::endl;\n      }\n      this->free_energy_statistics_.nominal_state_used = 1;\n    }\n    else if (best_idx <= int((getFracRandomNoiseTrajLValue()[0] + getFracRandomNoiseTrajLValue()[1]) * NUM_ROLLOUTS))\n    {\n      if (this->debug_)\n      {\n        std::cout << \"piecewise noise added to nominal.\" << std::endl;\n      }\n      this->free_energy_statistics_.nominal_state_used = 2;\n    }\n    else\n    {\n      if (this->debug_)\n      {\n        std::cout << \"new piecewise trajectory.\" << std::endl;\n      }\n      this->free_energy_statistics_.nominal_state_used = 3;\n    }\n  }\n\n  smoothControlTrajectory();\n  computeStateTrajectory(local_state);\n  state_array zero_state = state_array::Zero();\n  for (int i = 0; i < this->getNumTimesteps(); i++)\n  {\n    this->model_->enforceConstraints(zero_state, this->control_.col(i));\n    this->model_->enforceConstraints(zero_state, control_mppi_.col(i));\n    // this->control_.col(i)[1] =\n    //     fminf(fmaxf(this->control_.col(i)[1], this->model_->control_rngs_[1].x), this->model_->control_rngs_[1].y);\n  }\n\n  // Copy back sampled trajectories for visualization\n  if (!getVisualizePrimitives())\n  {\n    this->copySampledControlFromDevice(false);\n    this->copyTopControlFromDevice(true);\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::allocateCUDAMemory()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper();\n  HANDLE_ERROR(cudaMalloc((void**)&control_mppi_d_, sizeof(float) * DYN_T::CONTROL_DIM * MAX_TIMESTEPS));\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::copyMPPIControlToDevice(bool synchronize)\n{\n  HANDLE_ERROR(cudaMemcpyAsync(control_mppi_d_, control_mppi_.data(), sizeof(float) * control_mppi_.size(),\n                               cudaMemcpyHostToDevice, this->stream_));\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::computeStateTrajectory(const Eigen::Ref<const state_array>& x0)\n{\n  this->computeStateTrajectoryHelper(this->state_, x0, this->control_);\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::computeStoppingTrajectory(const Eigen::Ref<const state_array>& x0)\n{\n  state_array xdot;\n  state_array state = x0;\n  state_array xnext;\n  output_array output;\n  control_array u_i = control_array::Zero();\n  this->model_->initializeDynamics(state, u_i, output, 0, this->getDt());\n  for (int i = 0; i < this->getNumTimesteps() - 1; ++i)\n  {\n    this->model_->getStoppingControl(state, u_i);\n    this->model_->enforceConstraints(state, u_i);\n    this->control_.col(i) = u_i;\n    this->model_->step(state, xnext, xdot, u_i, output, i, this->getDt());\n    state = xnext;\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::slideControlSequence(int steps)\n{\n  // TODO does the logic of handling control history reasonable?\n  leash_jump_ = steps;\n  // Save the control history\n  this->saveControlHistoryHelper(steps, this->control_, this->control_history_);\n  this->saveControlHistoryHelper(steps, control_mppi_, control_mppi_history_);\n\n  this->slideControlSequenceHelper(steps, this->control_);\n  this->slideControlSequenceHelper(steps, control_mppi_);\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::smoothControlTrajectory()\n{\n  this->smoothControlTrajectoryHelper(this->control_, this->control_history_);\n  this->smoothControlTrajectoryHelper(control_mppi_, control_mppi_history_);\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X, int COST_B_Y, class PARAMS_T>\nvoid Primitives::calculateSampledStateTrajectories()\n{\n  int num_sampled_trajectories = this->getTotalSampledTrajectories();\n  // controls already copied in compute control\n\n  mppi_common::launchStateAndCostTrajectoryKernel<DYN_T, COST_T, FEEDBACK_GPU, BDIM_X, BDIM_Y>(\n      this->model_->model_d_, this->cost_->cost_d_, this->fb_controller_->getDevicePointer(), this->sampled_noise_d_,\n      this->initial_state_d_, this->sampled_outputs_d_, this->sampled_costs_d_, this->sampled_crash_status_d_,\n      num_sampled_trajectories, this->getNumTimesteps(), this->getDt(), this->vis_stream_);\n\n  for (int i = 0; i < num_sampled_trajectories; i++)\n  {\n    // set initial state to the first location\n    // shifted by one since we do not save the initial state\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_trajectories_[i].data(),\n                                 this->sampled_outputs_d_ + i * this->getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 (this->getNumTimesteps() - 1) * DYN_T::OUTPUT_DIM * sizeof(float),\n                                 cudaMemcpyDeviceToHost, this->vis_stream_));\n    HANDLE_ERROR(\n        cudaMemcpyAsync(this->sampled_costs_[i].data(), this->sampled_costs_d_ + (i * (this->getNumTimesteps() + 1)),\n                        (this->getNumTimesteps() + 1) * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_crash_status_[i].data(),\n                                 this->sampled_crash_status_d_ + (i * this->getNumTimesteps()),\n                                 this->getNumTimesteps() * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n}\n\n#undef Primitives\n"
  },
  {
    "path": "include/mppi/controllers/Primitives/primitives_controller.cuh",
    "content": "/**\n * Created by david fan on 04/11/22.\n * Creates the API for interfacing with an MPPI controller\n * should define a compute_control based on state as well\n * as return timing info\n **/\n\n#ifndef MPPIGENERIC_PRIMITIVES_CONTROLLER_CUH\n#define MPPIGENERIC_PRIMITIVES_CONTROLLER_CUH\n\n#include <mppi/controllers/controller.cuh>\n\n// this is needed to extend the coloredMPPI params and ensure that\n// dynamic reconfigure in mppi_generic_racer_plant.cpp works properly\n#include <mppi/controllers/ColoredMPPI/colored_mppi_controller.cuh>\n\n#include <vector>\n\ntemplate <int S_DIM, int C_DIM, int MAX_TIMESTEPS>\nstruct PrimitivesParams : public ColoredMPPIParams<S_DIM, C_DIM, MAX_TIMESTEPS>\n{\n  int num_primitive_iters_;\n  int num_piecewise_segments_ = 5;\n  std::vector<float> scale_piecewise_noise_;\n  std::vector<float> frac_add_nominal_traj_;\n  std::vector<float> scale_add_nominal_noise_;\n  float stopping_cost_threshold_ = 1.0e8;\n  float hysteresis_cost_threshold_ = 0.0;\n  bool visualize_primitives_ = false;\n\n  PrimitivesParams<S_DIM, C_DIM, MAX_TIMESTEPS>() = default;\n  PrimitivesParams<S_DIM, C_DIM, MAX_TIMESTEPS>(const PrimitivesParams<S_DIM, C_DIM, MAX_TIMESTEPS>& other)\n  {\n    typedef ColoredMPPIParams<S_DIM, C_DIM, MAX_TIMESTEPS> BASE;\n    const BASE& other_item_ref = other;\n    *(static_cast<BASE*>(this)) = other_item_ref;\n    this->scale_piecewise_noise_ = other.scale_piecewise_noise_;\n    this->frac_add_nominal_traj_ = other.frac_add_nominal_traj_;\n    this->scale_add_nominal_noise_ = other.scale_add_nominal_noise_;\n  }\n\n  PrimitivesParams<S_DIM, C_DIM, MAX_TIMESTEPS>(PrimitivesParams<S_DIM, C_DIM, MAX_TIMESTEPS>& other)\n  {\n    typedef ColoredMPPIParams<S_DIM, C_DIM, MAX_TIMESTEPS> BASE;\n    BASE& other_item_ref = other;\n    *(static_cast<BASE*>(this)) = other_item_ref;\n    this->scale_piecewise_noise_ = other.scale_piecewise_noise_;\n    this->frac_add_nominal_traj_ = other.frac_add_nominal_traj_;\n    this->scale_add_nominal_noise_ = other.scale_add_nominal_noise_;\n  }\n};\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, int BDIM_X, int BDIM_Y,\n          int COST_B_X = 64, int COST_B_Y = 2,\n          class PARAMS_T = PrimitivesParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>>\nclass PrimitivesController\n  : public Controller<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, BDIM_X, BDIM_Y, PARAMS_T>\n{\npublic:\n  // EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  // need control_array = ... so that we can initialize\n  // Eigen::Matrix with Eigen::Matrix::Zero();\n  typedef Controller<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, BDIM_X, BDIM_Y, PARAMS_T> PARENT_CLASS;\n  using control_array = typename PARENT_CLASS::control_array;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using state_trajectory = typename PARENT_CLASS::state_trajectory;\n  using state_array = typename PARENT_CLASS::state_array;\n  using output_array = typename PARENT_CLASS::output_array;\n  using sampled_cost_traj = typename PARENT_CLASS::sampled_cost_traj;\n  using FEEDBACK_GPU = typename PARENT_CLASS::TEMPLATED_FEEDBACK_GPU;\n\n  /**\n   *\n   * Public member functions\n   */\n  // Constructor\n  PrimitivesController(DYN_T* model, COST_T* cost, FB_T* fb_controller, float dt, int max_iter, float lambda,\n                       float alpha, const Eigen::Ref<const control_array>& control_std_dev,\n                       int num_timesteps = MAX_TIMESTEPS,\n                       const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n                       cudaStream_t stream = nullptr);\n\n  PrimitivesController(DYN_T* model, COST_T* cost, FB_T* fb_controller, PARAMS_T& params,\n                       cudaStream_t stream = nullptr);\n\n  // Destructor\n  ~PrimitivesController();\n\n  std::string getControllerName()\n  {\n    return \"Primitives\";\n  };\n\n  /**\n   * computes a new control sequence\n   * @param state starting position\n   */\n  void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride = 1) override;\n\n  /**\n   * Slide the control sequence back n steps\n   */\n  void slideControlSequence(int optimization_stride) override;\n\n  void setPercentageSampledControlTrajectories(float new_perc)\n  {\n    this->setPercentageSampledControlTrajectoriesHelper(new_perc, 1);\n  }\n\n  void setNumPrimitiveIterations(int new_num_iter)\n  {\n    this->params_.num_primitive_iters_ = new_num_iter;\n  }\n\n  int getNumPrimitiveIterations()\n  {\n    return this->params_.num_primitive_iters_;\n  }\n\n  void setGamma(float gamma)\n  {\n    this->params_.gamma = gamma;\n  }\n\n  float getGamma()\n  {\n    return this->params_.gamma;\n  }\n\n  void setRExp(float r)\n  {\n    this->params_.r = r;\n  }\n\n  float getRExp()\n  {\n    return this->params_.r;\n  }\n\n  void setOffsetDecayRate(float offset_decay_rate)\n  {\n    this->params_.offset_decay_rate = offset_decay_rate;\n  }\n\n  float getOffsetDecayRate()\n  {\n    return this->params_.offset_decay_rate;\n  }\n\n  void setColoredNoiseExponents(std::vector<float>& new_exponents)\n  {\n    this->params_.colored_noise_exponents_ = new_exponents;\n  }\n\n  float getColoredNoiseExponent(int index) const\n  {\n    return this->params_.colored_noise_exponents_[index];\n  }\n\n  void setPiecewiseSegments(int segments)\n  {\n    this->params_.num_piecewise_segments_ = segments;\n  }\n\n  int getPiecewiseSegments()\n  {\n    return this->params_.num_piecewise_segments_;\n  }\n\n  void setScalePiecewiseNoise(std::vector<float>& new_scale)\n  {\n    this->params_.scale_piecewise_noise_ = new_scale;\n  }\n\n  std::vector<float> getScalePiecewiseNoise()\n  {\n    return this->params_.scale_piecewise_noise_;\n  }\n\n  void setFracRandomNoiseTraj(std::vector<float> frac_add_nominal_traj)\n  {\n    this->params_.frac_add_nominal_traj_ = frac_add_nominal_traj;\n  }\n  std::vector<float> getFracRandomNoiseTraj()\n  {\n    return this->params_.frac_add_nominal_traj_;\n  }\n\n  void setScaleAddNominalNoise(std::vector<float> scale_add_nominal_noise)\n  {\n    this->params_.scale_add_nominal_noise_ = scale_add_nominal_noise;\n  }\n\n  std::vector<float> getScaleAddNominalNoise()\n  {\n    return this->params_.scale_add_nominal_noise_;\n  }\n\n  void setStateLeashLength(float new_state_leash, int index = 0)\n  {\n    this->params_.state_leash_dist_[index] = new_state_leash;\n  }\n\n  float getStateLeashLength(int index)\n  {\n    return this->params_.state_leash_dist_[index];\n  }\n\n  bool getLeashActive()\n  {\n    return leash_active_;\n  }\n\n  void setLeashActive(bool new_leash_active)\n  {\n    leash_active_ = new_leash_active;\n  }\n\n  void setStoppingCostThreshold(float new_stopping_cost_threshold)\n  {\n    this->params_.stopping_cost_threshold_ = new_stopping_cost_threshold;\n  }\n\n  float getStoppingCostThreshold()\n  {\n    return this->params_.stopping_cost_threshold_;\n  }\n\n  void setHysteresisCostThreshold(float new_hysteresis_cost_threshold)\n  {\n    this->params_.hysteresis_cost_threshold_ = new_hysteresis_cost_threshold;\n  }\n\n  float getHysteresisCostThreshold()\n  {\n    return this->params_.hysteresis_cost_threshold_;\n  }\n\n  void setVisualizePrimitives(bool visualize_primitives)\n  {\n    this->params_.visualize_primitives_ = visualize_primitives;\n  }\n\n  bool getVisualizePrimitives()\n  {\n    return this->params_.visualize_primitives_;\n  }\n\n  void calculateSampledStateTrajectories() override;\n\nprotected:\n  std::vector<float>& getScaleAddNominalNoiseLValue()\n  {\n    return this->params_.scale_add_nominal_noise_;\n  }\n\n  std::vector<float>& getScalePiecewiseNoiseLValue()\n  {\n    return this->params_.scale_piecewise_noise_;\n  }\n\n  std::vector<float>& getFracRandomNoiseTrajLValue()\n  {\n    return this->params_.frac_add_nominal_traj_;\n  }\n\n  std::vector<float>& getColoredNoiseExponentsLValue()\n  {\n    return this->params_.colored_noise_exponents_;\n  }\n\n  void computeStateTrajectory(const Eigen::Ref<const state_array>& x0);\n  void copyMPPIControlToDevice(bool synchronize = true);\n\n  void computeStoppingTrajectory(const Eigen::Ref<const state_array>& x0);\n  void smoothControlTrajectory();\n\n  int leash_jump_ = 1;\n  bool leash_active_ = false;\n\n  float* control_mppi_d_;                                         // Array of size DYN_T::CONTROL_DIM*NUM_TIMESTEPS\n  control_trajectory control_mppi_ = control_trajectory::Zero();  // host side mppi control trajectory\n  Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2> control_mppi_history_;\n\nprivate:\n  // ======== MUST BE OVERWRITTEN =========\n  void allocateCUDAMemory();\n  // ======== END MUST BE OVERWRITTEN =====\n};\n\n#if __CUDACC__\n#include \"primitives_controller.cu\"\n#endif\n\n#endif  // MPPIGENERIC_PRIMITIVES_CONTROLLER_CUH\n"
  },
  {
    "path": "include/mppi/controllers/R-MPPI/robust_mppi_controller.cu",
    "content": "#include \"robust_mppi_controller.cuh\"\n#include <Eigen/Eigenvalues>\n#include <exception>\n\n#define ROBUST_MPPI_TEMPLATE                                                                                           \\\n  template <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, class SAMPLING_T,              \\\n            class PARAMS_T>\n\n#define RobustMPPI RobustMPPIController<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, SAMPLING_T, PARAMS_T>\n\nROBUST_MPPI_TEMPLATE\nRobustMPPI::RobustMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt,\n                                 int max_iter, float lambda, float alpha, float value_function_threshold,\n                                 int num_timesteps, const Eigen::Ref<const control_trajectory>& init_control_traj,\n                                 int num_candidate_nominal_states, int optimization_stride, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha, num_timesteps, init_control_traj,\n                 stream)\n{\n  setValueFunctionThreshold(value_function_threshold);\n  setOptimizationStride(optimization_stride);\n  setNumCandidates(num_candidate_nominal_states);\n  updateNumCandidates(getNumCandidates());\n  setParams(this->params_);\n  this->sampler_->setNumDistributions(2);\n\n  // Zero the control history\n  this->control_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n  nominal_control_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // Copy the noise std_dev to the device\n  // this->copyControlStdDevToDevice();\n\n  // Initialize Feedback\n  this->fb_controller_->initTrackingController();\n\n  // Initialize the nominal control trajectory\n  nominal_control_trajectory_ = init_control_traj;\n\n  this->enable_feedback_ = true;\n  chooseAppropriateKernel();\n}\n\nROBUST_MPPI_TEMPLATE\nRobustMPPI::RobustMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n                                 cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, params, stream)\n{\n  updateNumCandidates(getNumCandidates());\n  setParams(params);\n  this->sampler_->setNumDistributions(2);\n\n  // Zero the control history\n  this->control_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n  nominal_control_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // Copy the noise std_dev to the device\n  // this->copyControlStdDevToDevice();\n\n  // Initialize Feedback\n  this->fb_controller_->initTrackingController();\n\n  // Initialize the nominal control trajectory\n  nominal_control_trajectory_ = this->params_.init_control_traj_;\n\n  this->enable_feedback_ = true;\n  chooseAppropriateKernel();\n}\n\nROBUST_MPPI_TEMPLATE\nRobustMPPI::~RobustMPPIController()\n{\n  deallocateNominalStateCandidateMemory();\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::allocateCUDAMemory()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper(1);\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::chooseAppropriateKernel()\n{\n  // Get properties of current GPU\n  cudaDeviceProp deviceProp;\n  HANDLE_ERROR(cudaGetDeviceProperties(&deviceProp, 0));\n\n  unsigned single_rollout_kernel_byte_size = mppi::kernels::rmppi::calcRMPPICombinedKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->fb_controller_->getHostPointer().get(),\n      this->params_.dynamics_rollout_dim_);\n  unsigned rollout_dyn_kernel_byte_size = mppi::kernels::rmppi::calcRMPPIDynKernelSharedMemSize(\n      this->model_, this->sampler_, this->fb_controller_->getHostPointer().get(), this->params_.dynamics_rollout_dim_);\n  unsigned rollout_cost_kernel_byte_size = mppi::kernels::rmppi::calcRMPPICostKernelSharedMemSize(\n      this->cost_, this->sampler_, this->fb_controller_->getHostPointer().get(), this->params_.cost_rollout_dim_);\n\n  // Limit kernel choice to those that fit within shared memory constraints\n  bool rollout_dyn_too_large = rollout_dyn_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool rollout_cost_too_large = rollout_cost_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool rollout_combined_too_large = single_rollout_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool rollout_set = false;\n\n  if (rollout_combined_too_large && (rollout_dyn_too_large || rollout_cost_too_large))\n  {\n    std::string error_msg =\n        \"There is not enough shared memory on the GPU for either rollout kernel option. The combined rollout kernel \"\n        \"takes \" +\n        std::to_string(single_rollout_kernel_byte_size) + \" bytes, the cost rollout kernel takes \" +\n        std::to_string(rollout_cost_kernel_byte_size) + \" bytes, the dynamics rollout kernel takes \" +\n        std::to_string(rollout_dyn_kernel_byte_size) + \" bytes, and the max is \" +\n        std::to_string(deviceProp.sharedMemPerBlock) +\n        \" bytes. Considering lowering the corresponding thread block sizes.\";\n    throw std::runtime_error(error_msg);\n  }\n  else if (rollout_dyn_too_large || rollout_cost_too_large)\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    rollout_set = true;\n  }\n  else if (rollout_combined_too_large)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    rollout_set = true;\n  }\n\n  /**\n   * Set up for kernel comparison\n   */\n  state_array zero_state = this->model_->getZeroState();\n  float* initial_state_nominal_d = this->initial_state_d_;\n  float* initial_state_real_d = this->initial_state_d_ + DYN_T::STATE_DIM;\n\n  // Transfer the initial state to the GPU\n  HANDLE_ERROR(cudaMemcpyAsync(initial_state_real_d, zero_state.data(), sizeof(float) * DYN_T::STATE_DIM,\n                               cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(initial_state_nominal_d, zero_state.data(), sizeof(float) * DYN_T::STATE_DIM,\n                               cudaMemcpyHostToDevice, this->stream_));\n  // Copy the importance sampling control to the system\n  this->sampler_->copyImportanceSamplerToDevice(nominal_control_trajectory_.data(), 0, false);\n  this->sampler_->copyImportanceSamplerToDevice(nominal_control_trajectory_.data(), 1, false);\n  // Send feedback gains to the GPU\n  this->fb_controller_->copyToDevice(false);\n\n  // Generate a the control perturbations for exploration\n  this->sampler_->generateSamples(1, 0, this->gen_, true);\n\n  float single_rollout_kernel_time_ms = std::numeric_limits<float>::infinity();\n  float split_rollout_kernel_time_ms = std::numeric_limits<float>::infinity();\n\n  auto start_rollout_single_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !rollout_set; i++)\n  {\n    mppi::kernels::rmppi::launchRMPPIRolloutKernel<DYN_T, COST_T, SAMPLING_T, FEEDBACK_GPU>(\n        this->model_, this->cost_, this->sampler_, this->fb_controller_->getHostPointer().get(), this->getDt(),\n        this->getNumTimesteps(), NUM_ROLLOUTS, this->getLambda(), this->getAlpha(), getValueFunctionThreshold(),\n        this->initial_state_d_, this->trajectory_costs_d_, this->params_.dynamics_rollout_dim_, this->stream_, true);\n  }\n  auto end_rollout_single_kernel_time = std::chrono::steady_clock::now();\n\n  auto start_rollout_split_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !rollout_set; i++)\n  {\n    mppi::kernels::rmppi::launchSplitRMPPIRolloutKernel<DYN_T, COST_T, SAMPLING_T, FEEDBACK_GPU>(\n        this->model_, this->cost_, this->sampler_, this->fb_controller_->getHostPointer().get(), this->getDt(),\n        this->getNumTimesteps(), NUM_ROLLOUTS, this->getLambda(), this->getAlpha(), getValueFunctionThreshold(),\n        this->initial_state_d_, this->output_d_, this->trajectory_costs_d_, this->params_.dynamics_rollout_dim_,\n        this->params_.cost_rollout_dim_, this->stream_, true);\n  }\n  auto end_rollout_split_kernel_time = std::chrono::steady_clock::now();\n\n  if (!rollout_set)\n  {\n    single_rollout_kernel_time_ms =\n        mppi::math::timeDiffms(end_rollout_single_kernel_time, start_rollout_single_kernel_time);\n    split_rollout_kernel_time_ms =\n        mppi::math::timeDiffms(end_rollout_split_kernel_time, start_rollout_split_kernel_time);\n  }\n\n  std::string kernel_choice = \"\";\n  if (split_rollout_kernel_time_ms < single_rollout_kernel_time_ms)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    kernel_choice = \"split \";\n  }\n  else\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    kernel_choice = \"single\";\n  }\n\n  this->logger_->info(\n      \"Choosing %s rollout kernel based on split taking %f ms and single taking %f ms after %d iterations\\n\",\n      kernel_choice.c_str(), split_rollout_kernel_time_ms, single_rollout_kernel_time_ms,\n      this->getNumKernelEvaluations());\n\n  // Do the same for the eval kernel\n  chooseAppropriateEvalKernel();\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::chooseAppropriateEvalKernel()\n{\n  // Get properties of current GPU\n  cudaDeviceProp deviceProp;\n  HANDLE_ERROR(cudaGetDeviceProperties(&deviceProp, 0));\n  // Get shared mem sizes for various kernels\n  unsigned single_eval_kernel_byte_size = mppi::kernels::rmppi::calcEvalCombinedKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, getNumEvalRollouts(), getNumEvalSamplesPerCandidate(),\n      this->params_.eval_dyn_kernel_dim_);\n\n  unsigned eval_dyn_kernel_byte_size = mppi::kernels::rmppi::calcEvalDynKernelSharedMemSize(\n      this->model_, this->sampler_, this->params_.eval_dyn_kernel_dim_);\n  unsigned eval_cost_kernel_byte_size = mppi::kernels::rmppi::calcEvalCostKernelSharedMemSize(\n      this->cost_, this->sampler_, getNumEvalRollouts(), getNumEvalSamplesPerCandidate(),\n      this->params_.eval_cost_kernel_dim_);\n\n  // Limit kernel choice to those that fit within shared memory constraints\n  bool eval_dyn_too_large = eval_dyn_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool eval_cost_too_large = eval_cost_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool eval_combined_too_large = single_eval_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool eval_set = false;\n\n  if (eval_combined_too_large && (eval_dyn_too_large || eval_cost_too_large))\n  {\n    std::string error_msg =\n        \"There is not enough shared memory on the GPU for either eval kernel option. The combined eval kernel \"\n        \"takes \" +\n        std::to_string(single_eval_kernel_byte_size) + \" bytes, the cost eval kernel takes \" +\n        std::to_string(eval_cost_kernel_byte_size) + \" bytes, the dynamics eval kernel takes \" +\n        std::to_string(eval_dyn_kernel_byte_size) + \" bytes, and the max is \" +\n        std::to_string(deviceProp.sharedMemPerBlock) +\n        \" bytes. Considering lowering the corresponding thread block sizes.\";\n    throw std::runtime_error(error_msg);\n  }\n  else if (eval_dyn_too_large || eval_cost_too_large)\n  {\n    this->setEvalKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    eval_set = true;\n  }\n  else if (eval_combined_too_large)\n  {\n    this->setEvalKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    eval_set = true;\n  }\n\n  // Send the nominal state candidates to the GPU\n  HANDLE_ERROR(cudaMemcpyAsync(importance_sampling_states_d_, candidate_nominal_states_.data(),\n                               sizeof(float) * DYN_T::STATE_DIM * getNumCandidates(), cudaMemcpyHostToDevice,\n                               this->stream_));\n\n  Eigen::MatrixXi temp_importance_sampler_strides = Eigen::MatrixXi::Ones(getNumCandidates(), 1);\n  // Send the importance sampler strides to the GPU\n  HANDLE_ERROR(cudaMemcpyAsync(importance_sampling_strides_d_, temp_importance_sampler_strides.data(),\n                               sizeof(int) * getNumCandidates(), cudaMemcpyHostToDevice, this->stream_));\n  // Send the nominal control to the GPU\n  copyNominalControlToDevice(false);\n  // Generate noise for the samples\n  this->sampler_->generateSamples(1, 0, this->gen_, true);\n\n  float single_eval_kernel_time_ms = std::numeric_limits<float>::infinity();\n  float split_eval_kernel_time_ms = std::numeric_limits<float>::infinity();\n\n  auto start_eval_split_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !eval_set; i++)\n  {\n    mppi::kernels::rmppi::launchSplitInitEvalKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), getNumEvalRollouts(),\n        this->getLambda(), this->getAlpha(), getNumEvalSamplesPerCandidate(), importance_sampling_strides_d_,\n        importance_sampling_states_d_, importance_sampling_outputs_d_, importance_sampling_costs_d_,\n        this->params_.eval_dyn_kernel_dim_, this->params_.eval_cost_kernel_dim_, this->stream_, true);\n  }\n  auto end_eval_split_kernel_time = std::chrono::steady_clock::now();\n  auto start_eval_single_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !eval_set; i++)\n  {\n    mppi::kernels::rmppi::launchInitEvalKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), getNumEvalRollouts(),\n        this->getLambda(), this->getAlpha(), getNumEvalSamplesPerCandidate(), importance_sampling_strides_d_,\n        importance_sampling_states_d_, importance_sampling_costs_d_, this->params_.eval_dyn_kernel_dim_, this->stream_,\n        true);\n  }\n  auto end_eval_single_kernel_time = std::chrono::steady_clock::now();\n\n  if (!eval_set)\n  {\n    single_eval_kernel_time_ms = mppi::math::timeDiffms(end_eval_single_kernel_time, start_eval_single_kernel_time);\n    split_eval_kernel_time_ms = mppi::math::timeDiffms(end_eval_split_kernel_time, start_eval_split_kernel_time);\n  }\n\n  std::string kernel_choice = \"\";\n  if (split_eval_kernel_time_ms < single_eval_kernel_time_ms)\n  {\n    this->setEvalKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    kernel_choice = \"split \";\n  }\n  else\n  {\n    this->setEvalKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    kernel_choice = \"single\";\n  }\n\n  this->logger_->info(\n      \"Choosing %s eval kernel based on split taking %f ms and single taking %f ms after %d iterations\\n\",\n      kernel_choice.c_str(), split_eval_kernel_time_ms, single_eval_kernel_time_ms, this->getNumKernelEvaluations());\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::setParams(const PARAMS_T& p)\n{\n  bool empty_eval_dyn_size = p.eval_dyn_kernel_dim_.x == 0;\n  bool changed_sample_size = p.eval_dyn_kernel_dim_.x != this->params_.eval_dyn_kernel_dim_.x;\n  bool changed_num_candidates = p.num_candidate_nominal_states_ != this->params_.num_candidate_nominal_states_;\n  PARENT_CLASS::setParams(p);\n  this->params_.dynamics_rollout_dim_.z = max(2, p.dynamics_rollout_dim_.z);\n  this->params_.cost_rollout_dim_.z = max(2, p.cost_rollout_dim_.z);\n\n  // Set up cost eval kernel dimensions\n  if (p.eval_cost_kernel_dim_.x == 0)\n  {\n    this->params_.eval_cost_kernel_dim_.x = this->getNumTimesteps();\n  }\n\n  this->params_.eval_cost_kernel_dim_.y = max(1, p.eval_cost_kernel_dim_.y);\n  this->params_.eval_cost_kernel_dim_.z = max(1, p.eval_cost_kernel_dim_.z);\n\n  // Set up dynamics eval kernel dimensions\n  if (empty_eval_dyn_size)\n  {\n    this->params_.eval_dyn_kernel_dim_.x = 32;\n    changed_sample_size = true;\n  }\n  this->params_.eval_dyn_kernel_dim_.y = max(1, p.eval_dyn_kernel_dim_.y);\n  this->params_.eval_dyn_kernel_dim_.z = max(1, p.eval_dyn_kernel_dim_.z);\n\n  if (changed_sample_size)\n  {\n    updateCandidateMemory();\n  }\n  else if (changed_num_candidates)\n  {\n    updateNumCandidates(p.num_candidate_nominal_states_);\n  }\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::getInitNominalStateCandidates(const Eigen::Ref<const state_array>& nominal_x_k,\n                                               const Eigen::Ref<const state_array>& nominal_x_kp1,\n                                               const Eigen::Ref<const state_array>& real_x_kp1)\n{\n  Eigen::MatrixXf points(DYN_T::STATE_DIM, 3);\n  points << nominal_x_k, nominal_x_kp1, real_x_kp1;\n  auto candidates = points * line_search_weights_;\n  for (int i = 0; i < getNumCandidates(); ++i)\n  {\n    candidate_nominal_states_[i] = candidates.col(i);\n  }\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::resetCandidateCudaMem()\n{\n  deallocateNominalStateCandidateMemory();\n\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n  HANDLE_ERROR(\n      cudaMallocAsync((void**)&importance_sampling_costs_d_, sizeof(float) * getNumEvalRollouts(), this->stream_));\n  HANDLE_ERROR(cudaMallocAsync((void**)&importance_sampling_outputs_d_,\n                               sizeof(float) * getNumEvalRollouts() * this->getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                               this->stream_));\n  HANDLE_ERROR(cudaMallocAsync((void**)&importance_sampling_states_d_,\n                               sizeof(float) * DYN_T::STATE_DIM * getNumCandidates(), this->stream_));\n  HANDLE_ERROR(\n      cudaMallocAsync((void**)&importance_sampling_strides_d_, sizeof(int) * getNumCandidates(), this->stream_));\n#else\n  HANDLE_ERROR(cudaMalloc((void**)&importance_sampling_costs_d_, sizeof(float) * getNumEvalRollouts()));\n  HANDLE_ERROR(cudaMalloc((void**)&importance_sampling_outputs_d_,\n                          sizeof(float) * getNumEvalRollouts() * this->getNumTimesteps() * DYN_T::OUTPUT_DIM));\n  HANDLE_ERROR(\n      cudaMalloc((void**)&importance_sampling_states_d_, sizeof(float) * DYN_T::STATE_DIM * getNumCandidates()));\n  HANDLE_ERROR(cudaMalloc((void**)&importance_sampling_strides_d_, sizeof(int) * getNumCandidates()));\n#endif\n  // Set flag so that the we know cudamemory is allocated\n  importance_sampling_cuda_mem_init_ = true;\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::deallocateNominalStateCandidateMemory()\n{\n  if (importance_sampling_cuda_mem_init_)\n  {\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n    HANDLE_ERROR(cudaFreeAsync(importance_sampling_costs_d_, this->stream_));\n    HANDLE_ERROR(cudaFreeAsync(importance_sampling_outputs_d_, this->stream_));\n    HANDLE_ERROR(cudaFreeAsync(importance_sampling_states_d_, this->stream_));\n    HANDLE_ERROR(cudaFreeAsync(importance_sampling_strides_d_, this->stream_));\n#else\n    HANDLE_ERROR(cudaFree(importance_sampling_costs_d_));\n    HANDLE_ERROR(cudaFree(importance_sampling_outputs_d_));\n    HANDLE_ERROR(cudaFree(importance_sampling_states_d_));\n    HANDLE_ERROR(cudaFree(importance_sampling_strides_d_));\n#endif\n    importance_sampling_costs_d_ = nullptr;\n    importance_sampling_outputs_d_ = nullptr;\n    importance_sampling_states_d_ = nullptr;\n    importance_sampling_strides_d_ = nullptr;\n\n    // Set flag so that we know cudamemory has been freed\n    importance_sampling_cuda_mem_init_ = false;\n  }\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::copyNominalControlToDevice(bool synchronize)\n{\n  this->sampler_->copyImportanceSamplerToDevice(nominal_control_trajectory_.data(), 0, synchronize);\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::updateNumCandidates(int new_num_candidates)\n{\n  if ((new_num_candidates * getNumEvalSamplesPerCandidate()) > NUM_ROLLOUTS)\n  {\n    std::cerr << \"ERROR: (number of candidates) * (SAMPLES_PER_CANDIDATE) cannot exceed NUM_ROLLOUTS\\n\";\n    std::cerr << \"number of candidates: \" << new_num_candidates\n              << \", SAMPLES_PER_CANDIDATE: \" << getNumEvalSamplesPerCandidate() << \", NUM_ROLLOUTS: \" << NUM_ROLLOUTS\n              << \"\\n\";\n    std::terminate();\n  }\n\n  // New number must be odd and greater than 3\n  if (new_num_candidates < 3)\n  {\n    std::cerr << \"ERROR: number of candidates must be greater or equal to 3\\n\";\n    std::cerr << \"number of candidates: \" << new_num_candidates << \"\\n\";\n    std::terminate();\n  }\n  if (new_num_candidates % 2 == 0)\n  {\n    std::cerr << \"ERROR: number of candidates must be odd\\n\";\n    std::cerr << \"number of candidates: \" << new_num_candidates << \"\\n\";\n    std::terminate();\n  }\n\n  // Set the new value of the number of candidates\n  setNumCandidates(new_num_candidates);\n\n  updateCandidateMemory();\n\n  // Recompute the line search weights based on the number of candidates\n  computeLineSearchWeights();\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::updateCandidateMemory()\n{\n  // Resize the vector holding the candidate nominal states\n  candidate_nominal_states_.resize(getNumCandidates());\n\n  // Resize the matrix holding the importance sampler strides\n  importance_sampler_strides_.resize(getNumCandidates(), 1);\n\n  // Resize the trajectory costs matrix\n  candidate_trajectory_costs_.resize(getNumEvalRollouts(), 1);\n  candidate_trajectory_costs_.setZero();\n\n  // Resize the free energy costs matrix\n  candidate_free_energy_.resize(getNumCandidates(), 1);\n  candidate_free_energy_.setZero();\n\n  // Deallocate and reallocate cuda memory\n  resetCandidateCudaMem();\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::computeLineSearchWeights()\n{\n  line_search_weights_.resize(3, getNumCandidates());\n\n  // For a given setup, this never changes.... why recompute every time?\n  int num_candid_over_2 = getNumCandidates() / 2;\n  for (int i = 0; i < num_candid_over_2 + 1; i++)\n  {\n    line_search_weights_(0, i) = 1 - i / float(num_candid_over_2);\n    line_search_weights_(1, i) = i / float(num_candid_over_2);\n    line_search_weights_(2, i) = 0.0;\n  }\n  for (int i = 1; i < num_candid_over_2 + 1; i++)\n  {\n    line_search_weights_(0, num_candid_over_2 + i) = 0.0;\n    line_search_weights_(1, num_candid_over_2 + i) = 1 - i / float(num_candid_over_2);\n    line_search_weights_(2, num_candid_over_2 + i) = i / float(num_candid_over_2);\n  }\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::computeImportanceSamplerStride(int stride)\n{\n  Eigen::MatrixXf stride_vec(1, 3);\n  stride_vec << 0, stride, stride;\n\n  // Perform matrix multiplication, convert to array so that we can round the floats to the nearest\n  // integer. Then cast the resultant float array to an int array. Then set equal to our int matrix.\n  importance_sampler_strides_ = (stride_vec * line_search_weights_).array().round().template cast<int>();\n  // importance_sampler_strides_.array() += stride;\n}\n\nROBUST_MPPI_TEMPLATE\nfloat RobustMPPI::computeCandidateBaseline()\n{\n  float baseline = candidate_trajectory_costs_(0);\n  for (int i = 1; i < getNumEvalRollouts(); i++)\n  {  // TODO What is the reasoning behind only using the first condition to get the baseline?\n    if (candidate_trajectory_costs_(i) < baseline)\n    {\n      baseline = candidate_trajectory_costs_(i);\n    }\n  }\n  return baseline;\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::computeBestIndex()\n{\n  candidate_free_energy_.setZero();\n  float baseline = computeCandidateBaseline();\n  for (int i = 0; i < getNumCandidates(); i++)\n  {\n    for (int j = 0; j < getNumEvalSamplesPerCandidate(); j++)\n    {\n      candidate_free_energy_(i) += expf(\n          -1.0 / this->getLambda() * (candidate_trajectory_costs_(i * getNumEvalSamplesPerCandidate() + j) - baseline));\n    }\n    candidate_free_energy_(i) /= (1.0 * getNumEvalSamplesPerCandidate());\n    candidate_free_energy_(i) = -this->getLambda() * logf(candidate_free_energy_(i)) + baseline;\n    if (candidate_free_energy_(i) < getValueFunctionThreshold())\n    {\n      best_index_ = i;\n    }\n  }\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::updateImportanceSamplingControl(const Eigen::Ref<const state_array>& state, int stride)\n{\n  // (Controller Frequency)*(Optimization Time) corresponds to how many timesteps occurred in the last optimization\n  real_stride_ = stride;\n\n  computeNominalStateAndStride(state, stride);  // Launches the init eval kernel\n\n  // Save the nominal control history for the importance sampler\n  this->saveControlHistoryHelper(nominal_stride_, nominal_control_trajectory_, nominal_control_history_);\n\n  // Save the real control history for the optimal control\n  this->saveControlHistoryHelper(real_stride_, this->control_, this->control_history_);\n\n  // Slide the control sequence for the nominal control trajectory\n  this->slideControlSequenceHelper(nominal_stride_, nominal_control_trajectory_);\n\n  // Compute the nominal trajectory because we have slid the control sequence and updated the nominal state\n  this->computeStateTrajectoryHelper(nominal_state_trajectory_, nominal_state_, nominal_control_trajectory_);\n  // Compute the feedback gains and save them to an array\n  computeNominalFeedbackGains(state);\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::computeNominalStateAndStride(const Eigen::Ref<const state_array>& state, int stride)\n{\n  if (!nominal_state_init_)\n  {\n    nominal_state_ = state;\n    nominal_state_init_ = true;\n    nominal_stride_ = 0;\n  }\n  else\n  {\n    getInitNominalStateCandidates(nominal_state_trajectory_.col(0), nominal_state_trajectory_.col(1), state);\n    computeImportanceSamplerStride(stride);\n\n    // Send the nominal state candidates to the GPU\n    HANDLE_ERROR(cudaMemcpyAsync(importance_sampling_states_d_, candidate_nominal_states_.data(),\n                                 sizeof(float) * DYN_T::STATE_DIM * getNumCandidates(), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    // Send the importance sampler strides to the GPU\n    HANDLE_ERROR(cudaMemcpyAsync(importance_sampling_strides_d_, importance_sampler_strides_.data(),\n                                 sizeof(int) * getNumCandidates(), cudaMemcpyHostToDevice, this->stream_));\n    // Send the nominal control to the GPU\n    copyNominalControlToDevice(false);\n\n    // Generate noise for the samples\n    this->sampler_->generateSamples(stride, 0, this->gen_, false);\n\n    // Launch the init eval kernel\n    if (this->getEvalKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n    {\n      mppi::kernels::rmppi::launchSplitInitEvalKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), getNumEvalRollouts(),\n          this->getLambda(), this->getAlpha(), getNumEvalSamplesPerCandidate(), importance_sampling_strides_d_,\n          importance_sampling_states_d_, importance_sampling_outputs_d_, importance_sampling_costs_d_,\n          this->params_.eval_dyn_kernel_dim_, this->params_.eval_cost_kernel_dim_, this->stream_, false);\n    }\n    else if (this->getEvalKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n    {\n      mppi::kernels::rmppi::launchInitEvalKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), getNumEvalRollouts(),\n          this->getLambda(), this->getAlpha(), getNumEvalSamplesPerCandidate(), importance_sampling_strides_d_,\n          importance_sampling_states_d_, importance_sampling_costs_d_, this->params_.eval_dyn_kernel_dim_,\n          this->stream_, false);\n    }\n\n    HANDLE_ERROR(cudaMemcpyAsync(candidate_trajectory_costs_.data(), importance_sampling_costs_d_,\n                                 sizeof(float) * getNumEvalRollouts(), cudaMemcpyDeviceToHost, this->stream_));\n    cudaStreamSynchronize(this->stream_);\n\n    // Compute the best nominal state candidate from the rollouts\n    computeBestIndex();\n    //    best_index_ = 8;\n    this->free_energy_statistics_.nominal_state_used = best_index_;\n    nominal_stride_ = importance_sampler_strides_(best_index_);\n    nominal_state_ = candidate_nominal_states_[best_index_];\n  }\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::computeNominalFeedbackGains(const Eigen::Ref<const state_array>& state)\n{\n  this->computeFeedbackHelper(state, nominal_state_trajectory_, nominal_control_trajectory_);\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride)\n{\n  // Handy dandy pointers to nominal data\n  float* trajectory_costs_nominal_d = this->trajectory_costs_d_;\n  float* trajectory_costs_real_d = this->trajectory_costs_d_ + NUM_ROLLOUTS;\n  float* initial_state_nominal_d = this->initial_state_d_;\n  float* initial_state_real_d = this->initial_state_d_ + DYN_T::STATE_DIM;\n\n  this->free_energy_statistics_.nominal_sys.previousBaseline = this->getBaselineCost(0);\n  this->free_energy_statistics_.real_sys.previousBaseline = this->getBaselineCost(1);\n\n  // Transfer the feedback gains to the GPU\n  this->fb_controller_->copyToDevice(false);\n\n  // Transfer the real initial state to the GPU\n  HANDLE_ERROR(cudaMemcpyAsync(initial_state_real_d, state.data(), sizeof(float) * DYN_T::STATE_DIM,\n                               cudaMemcpyHostToDevice, this->stream_));\n  // Transfer the nominal state to the GPU: recall that the device GPU has the augmented state [nominal state, real\n  // state]\n  HANDLE_ERROR(cudaMemcpyAsync(initial_state_nominal_d, nominal_state_.data(), sizeof(float) * DYN_T::STATE_DIM,\n                               cudaMemcpyHostToDevice, this->stream_));\n\n  for (int opt_iter = 0; opt_iter < this->getNumIters(); opt_iter++)\n  {\n    // Copy the importance sampling control to the system\n    this->sampler_->copyImportanceSamplerToDevice(nominal_control_trajectory_.data(), 0, false);\n    this->sampler_->copyImportanceSamplerToDevice(nominal_control_trajectory_.data(), 1, false);\n\n    // Generate a the control perturbations for exploration\n    this->sampler_->generateSamples(optimization_stride, opt_iter, this->gen_, false);\n\n    // Launch the rollout kernel\n    if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n    {\n      mppi::kernels::rmppi::launchSplitRMPPIRolloutKernel<DYN_T, COST_T, SAMPLING_T, FEEDBACK_GPU>(\n          this->model_, this->cost_, this->sampler_, this->fb_controller_->getHostPointer().get(), this->getDt(),\n          this->getNumTimesteps(), NUM_ROLLOUTS, this->getLambda(), this->getAlpha(), getValueFunctionThreshold(),\n          this->initial_state_d_, this->output_d_, this->trajectory_costs_d_, this->params_.dynamics_rollout_dim_,\n          this->params_.cost_rollout_dim_, this->stream_, false);\n    }\n    else\n    {\n      mppi::kernels::rmppi::launchRMPPIRolloutKernel<DYN_T, COST_T, SAMPLING_T, FEEDBACK_GPU>(\n          this->model_, this->cost_, this->sampler_, this->fb_controller_->getHostPointer().get(), this->getDt(),\n          this->getNumTimesteps(), NUM_ROLLOUTS, this->getLambda(), this->getAlpha(), getValueFunctionThreshold(),\n          this->initial_state_d_, this->trajectory_costs_d_, this->params_.dynamics_rollout_dim_, this->stream_, false);\n    }\n\n    // Return the costs ->  nominal,  real costs\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), trajectory_costs_real_d, NUM_ROLLOUTS * sizeof(float),\n                                 cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_nominal_.data(), trajectory_costs_nominal_d,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    // Launch the norm exponential kernels for the nominal costs and the real costs\n    this->setBaseline(mppi::kernels::computeBaselineCost(trajectory_costs_nominal_.data(), NUM_ROLLOUTS), 0);\n    this->setBaseline(mppi::kernels::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS), 1);\n\n    // In this case this->gamma = 1 / lambda\n    mppi::kernels::launchNormExpKernel(NUM_ROLLOUTS, this->getNormExpThreads(), trajectory_costs_nominal_d,\n                                       1.0 / this->getLambda(), this->getBaselineCost(0), this->stream_, false);\n    mppi::kernels::launchNormExpKernel(NUM_ROLLOUTS, this->getNormExpThreads(), trajectory_costs_real_d,\n                                       1.0 / this->getLambda(), this->getBaselineCost(1), this->stream_, false);\n\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), trajectory_costs_real_d, NUM_ROLLOUTS * sizeof(float),\n                                 cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_nominal_.data(), trajectory_costs_nominal_d,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    // Launch the weighted reduction kernel for the nominal costs and the real costs\n    this->setNormalizer(mppi::kernels::computeNormalizer(trajectory_costs_nominal_.data(), NUM_ROLLOUTS), 0);\n    this->setNormalizer(mppi::kernels::computeNormalizer(this->trajectory_costs_.data(), NUM_ROLLOUTS), 1);\n\n    // Compute real free energy\n    mppi::kernels::computeFreeEnergy(this->free_energy_statistics_.real_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.real_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.real_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_.data(), NUM_ROLLOUTS, this->getBaselineCost(1),\n                                     this->getLambda());\n\n    // Compute Nominal State free Energy\n    mppi::kernels::computeFreeEnergy(this->free_energy_statistics_.nominal_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.nominal_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.nominal_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS, this->getBaselineCost(0),\n                                     this->getLambda());\n\n    // Calculate new optimal trajectories\n    this->sampler_->updateDistributionParamsFromDevice(trajectory_costs_nominal_d, this->getNormalizerCost(0), 0,\n                                                       false);\n    this->sampler_->updateDistributionParamsFromDevice(trajectory_costs_real_d, this->getNormalizerCost(1), 1, false);\n\n    // Transfer the new control to the host\n    this->sampler_->setHostOptimalControlSequence(nominal_control_trajectory_.data(), 0, false);\n    this->sampler_->setHostOptimalControlSequence(this->control_.data(), 1, true);\n  }\n  // Smooth the control\n  this->smoothControlTrajectoryHelper(this->control_, this->control_history_);\n  this->smoothControlTrajectoryHelper(nominal_control_trajectory_, nominal_control_history_);\n\n  // Compute the nominal trajectory because we updated the nominal control!\n  this->computeStateTrajectoryHelper(nominal_state_trajectory_, nominal_state_, nominal_control_trajectory_);\n\n  this->free_energy_statistics_.real_sys.normalizerPercent = this->getNormalizerCost(1) / NUM_ROLLOUTS;\n  this->free_energy_statistics_.real_sys.increase =\n      this->getBaselineCost(1) - this->free_energy_statistics_.real_sys.previousBaseline;\n  this->free_energy_statistics_.nominal_sys.normalizerPercent = this->getNormalizerCost(0) / NUM_ROLLOUTS;\n  this->free_energy_statistics_.nominal_sys.increase =\n      this->getBaselineCost(0) - this->free_energy_statistics_.nominal_sys.previousBaseline;\n\n  // Copy back sampled trajectories\n  this->copySampledControlFromDevice(false);\n  if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n  {  // copy initial state to vis initial state for use with visualizeKernel\n    HANDLE_ERROR(cudaMemcpyAsync(this->vis_initial_state_d_, this->initial_state_d_,\n                                 sizeof(float) * DYN_T::STATE_DIM * 2, cudaMemcpyDeviceToDevice, this->vis_stream_));\n  }\n  this->copyTopControlFromDevice(true);\n}\n\nROBUST_MPPI_TEMPLATE\nfloat RobustMPPI::computeDF()\n{\n  return (this->getFeedbackPropagatedStateSeq().col(0) - this->getFeedbackPropagatedStateSeq().col(1)).norm() +\n         (this->getTargetStateSeq().col(0) - this->getFeedbackPropagatedStateSeq().col(0)).norm();\n}\n\nROBUST_MPPI_TEMPLATE\nvoid RobustMPPI::calculateSampledStateTrajectories()\n{\n  int num_sampled_trajectories = this->getTotalSampledTrajectories();\n\n  // control already copied in compute control, so run kernel\n  if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n  {\n    mppi::kernels::launchVisualizeCostKernel<COST_T, SAMPLING_T>(\n        this->cost_->cost_d_, this->sampler_->sampling_d_, this->getDt(), this->getNumTimesteps(),\n        num_sampled_trajectories, this->getLambda(), this->getAlpha(), this->sampled_outputs_d_,\n        this->sampled_crash_status_d_, this->sampled_costs_d_, this->params_.cost_rollout_dim_, this->stream_, false);\n  }\n  else if (this->getKernelChoiceAsEnum() == kernelType::USE_SINGLE_KERNEL)\n  {\n    mppi::kernels::launchVisualizeKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), num_sampled_trajectories,\n        this->getLambda(), this->getAlpha(), this->vis_initial_state_d_, this->sampled_outputs_d_,\n        this->sampled_costs_d_, this->sampled_crash_status_d_, this->params_.visualize_dim_, this->stream_, false);\n  }\n\n  // copy back results\n  for (int i = 0; i < num_sampled_trajectories * 2; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_trajectories_[i].data(),\n                                 this->sampled_outputs_d_ + i * this->getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 this->getNumTimesteps() * DYN_T::OUTPUT_DIM * sizeof(float), cudaMemcpyDeviceToHost,\n                                 this->vis_stream_));\n  }\n  HANDLE_ERROR(cudaMemcpyAsync(this->sampled_costs_.data(), this->sampled_costs_d_,\n                               this->getNumTimesteps() * 2 * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(this->sampled_crash_status_.data(), this->sampled_crash_status_d_,\n                               this->getNumTimesteps() * 2 * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n}\n"
  },
  {
    "path": "include/mppi/controllers/R-MPPI/robust_mppi_controller.cuh",
    "content": "/*\n * Software License Agreement (BSD License)\n * Copyright (c) 2013, Georgia Institute of Technology\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n/**********************************************\n * @file mppi_controller.cuh\n * @author Grady Williams <gradyrw@gmail.com>\n * @date May 24, 2017\n * @copyright 2017 Georgia Institute of Technology\n * @brief Class definition for the MPPI controller.\n ***********************************************/\n\n#ifndef MPPI_GEN_RMPPI_CONTROLLER_CUH_\n#define MPPI_GEN_RMPPI_CONTROLLER_CUH_\n\n#include <curand.h>\n#include <mppi/controllers/controller.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/core/rmppi_kernels.cuh>\n\n// Needed for list of candidate states\n#include <mppi/ddp/util.h>\n\ntemplate <int S_DIM, int C_DIM, int MAX_TIMESTEPS>\nstruct RobustMPPIParams : public ControllerParams<S_DIM, C_DIM, MAX_TIMESTEPS>\n{\n  float value_function_threshold_ = 1000.0;\n  int optimization_stride_ = 1;\n  int num_candidate_nominal_states_ = 9;\n  dim3 eval_cost_kernel_dim_;\n  dim3 eval_dyn_kernel_dim_;\n};\n\n// template <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS = 2560, int BDIM_X = 64,\n//           int BDIM_Y = 1, class PARAMS_T = RobustMPPIParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>,\n//           int SAMPLES_PER_CONDITION_MULTIPLIER = 1>\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS = 2560,\n          class SAMPLING_T = ::mppi::sampling_distributions::GaussianDistribution<typename DYN_T::DYN_PARAMS_T>,\n          class PARAMS_T = RobustMPPIParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>>\nclass RobustMPPIController : public Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T>\n{\npublic:\n  /**\n   * Set up useful types\n   */\n  typedef Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T> PARENT_CLASS;\n\n  using control_array = typename PARENT_CLASS::control_array;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using state_trajectory = typename PARENT_CLASS::state_trajectory;\n  using state_array = typename PARENT_CLASS::state_array;\n  using sampled_cost_traj = typename PARENT_CLASS::sampled_cost_traj;\n  using FEEDBACK_STATE = typename PARENT_CLASS::TEMPLATED_FEEDBACK_STATE;\n  using FEEDBACK_PARAMS = typename PARENT_CLASS::TEMPLATED_FEEDBACK_PARAMS;\n  using FEEDBACK_GPU = typename PARENT_CLASS::TEMPLATED_FEEDBACK_GPU;\n  using NominalCandidateVector = typename util::NamedEigenAlignedVector<state_array>;\n\n  static const int STATE_DIM = DYN_T::STATE_DIM;\n  static const int CONTROL_DIM = DYN_T::CONTROL_DIM;\n\n  // Number of samples per condition must be a multiple of the blockDIM\n  // static const int SAMPLES_PER_CANDIDATE = SAMPLES_PER_CANDIDATE_MULTIPLIER;\n\n  int getNumEvalSamplesPerCandidate() const\n  {\n    return this->params_.eval_dyn_kernel_dim_.x;\n  }\n\n  // float value_function_threshold_ = 1000.0;\n\n  state_array nominal_state_ = state_array::Zero();\n\n  /**\n   * @brief Constructor for mppi controller class\n   * @param model A basis function model of the system dynamics.\n   * @param cost An MppiCost object.\n   * @param dt The time increment. horizon = num_timesteps*dt\n   * @param max_iter number of times to repeat control sequence calculations\n   * @param gamma\n   * @param num_timesteps The number of timesteps to look ahead for.\n   * TODO Finish this description\n   */\n  RobustMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt, int max_iter,\n                       float lambda, float alpha, float value_function_threshold, int num_timesteps = MAX_TIMESTEPS,\n                       const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n                       int num_candidate_nominal_states = 9, int optimization_stride = 1,\n                       cudaStream_t stream = nullptr);\n\n  RobustMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n                       cudaStream_t stream = nullptr);\n\n  /**\n   * @brief Destructor for mppi controller class.\n   */\n  ~RobustMPPIController();\n\n  std::string getControllerName()\n  {\n    return \"Robust MPPI\";\n  };\n\n  // Initializes the num_candidates, candidate_nominal_states, linesearch_weights,\n  // and allocates the associated CUDA memory\n  void updateNumCandidates(int new_num_candidates);\n\n  // Update the importance sampler prior to calling computeControl\n  void updateImportanceSamplingControl(const Eigen::Ref<const state_array>& state, int stride);\n\n  /**\n   * @brief Compute the control given the current state of the system.\n   * @param state The current state of the autorally system.\n   */\n  void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride = 1);\n\n  control_trajectory getControlSeq() const override\n  {\n    return this->control_;\n  };\n\n  state_trajectory getTargetStateSeq() const override\n  {\n    return nominal_state_trajectory_;\n  };\n\n  state_array getNominalState() const\n  {\n    return nominal_state_;\n  }\n\n  int getBestIndex() const\n  {\n    return best_index_;\n  };\n\n  float getValueFunctionThreshold() const\n  {\n    return this->params_.value_function_threshold_;\n  }\n\n  int getOptimizationStride() const\n  {\n    return this->params_.optimization_stride_;\n  }\n\n  int getNumCandidates() const\n  {\n    return this->params_.num_candidate_nominal_states_;\n  }\n\n  int getNumEvalRollouts() const\n  {\n    return getNumCandidates() * getNumEvalSamplesPerCandidate();\n  }\n\n  int getEvalKernelChoiceAsInt() const\n  {\n    return static_cast<int>(use_eval_kernel_);\n  }\n\n  kernelType getEvalKernelChoiceAsEnum() const\n  {\n    return use_eval_kernel_;\n  }\n\n  // Does nothing. This reason is because the control sliding happens during the importance sampler update.\n  // The control applied to the real system (during the MPPI rollouts) is the nominal control (which slides\n  // during the importance sampler update), plus the feedback term. Inside the runControlIteration function\n  // slideControl sequence is called prior to optimization, after the importance sampler update.\n  void slideControlSequence(int steps) override{};\n\n  // Feedback gain computation is done after the importance sampling update. The nominal trajectory computed\n  // during the importance sampling update does not change after the optimization, thus the feedback gains will\n  // not change either. In the current implementation of runControlIteration, the compute feedback gains is called\n  // after the computation of the optimal control.\n  void computeFeedback(const Eigen::Ref<const state_array>& state) override{};\n\n  Eigen::MatrixXf getCandidateFreeEnergy()\n  {\n    return candidate_free_energy_;\n  };\n\n  float computeDF();\n\n  void setPercentageSampledControlTrajectories(float new_perc)\n  {\n    this->setPercentageSampledControlTrajectoriesHelper(new_perc, 2);\n  }\n\n  void setValueFunctionThreshold(float value_function_threshold)\n  {\n    this->params_.value_function_threshold_ = value_function_threshold;\n  }\n\n  void setOptimizationStride(float optimization_stride)\n  {\n    this->params_.optimization_stride_ = optimization_stride;\n  }\n\n  void setNumCandidates(int num_candidate_nominal_states)\n  {\n    this->params_.num_candidate_nominal_states_ = num_candidate_nominal_states;\n  }\n\n  void setNumSamplesPerCandidate(const int& num_samples)\n  {\n    this->params_.eval_dyn_kernel_dim_.x = num_samples;\n    updateCandidateMemory();\n  }\n\n  void setEvalKernelChoice(const int& kernel_type)\n  {\n    use_eval_kernel_ = static_cast<kernelType>(kernel_type);\n  }\n\n  void setEvalKernelChoice(const kernelType& kernel_type)\n  {\n    use_eval_kernel_ = kernel_type;\n  }\n\n  void setParams(const PARAMS_T& p);\n\n  void calculateSampledStateTrajectories() override;\n\n  void chooseAppropriateKernel() override;\n\n  void chooseAppropriateEvalKernel();\n\nprotected:\n  bool importance_sampling_cuda_mem_init_ = false;\n  // int num_candidate_nominal_states_;\n  int best_index_ = 0;  // Selected nominal state candidate\n  // int optimization_stride_;  // Number of timesteps to apply the optimal control (== 1 for true MPC)\n  int nominal_stride_ = 0;  // Stride for the chosen nominal state of the importance sampler\n  int real_stride_ = 0;     // Stride for the optimal controller sliding\n  bool nominal_state_init_ = false;\n\n  // Free energy variables\n  float nominal_free_energy_mean_ = 0;\n  float nominal_free_energy_variance_ = 0;\n  float nominal_free_energy_modified_variance_ = 0;\n\n  // Storage classes\n  control_trajectory nominal_control_trajectory_ = control_trajectory::Zero();\n  state_trajectory nominal_state_trajectory_ = state_trajectory::Zero();\n  sampled_cost_traj trajectory_costs_nominal_ = sampled_cost_traj::Zero();\n\n  // Make the control history size flexible, related to issue #30\n  Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2> nominal_control_history_;  // History used for nominal_state IS\n\n  NominalCandidateVector candidate_nominal_states_ = { state_array::Zero() };\n  Eigen::MatrixXf line_search_weights_;         // At minimum there must be 3 candidates\n  Eigen::MatrixXi importance_sampler_strides_;  // Time index where control trajectory starts for each nominal state\n                                                // candidate\n  Eigen::MatrixXf candidate_trajectory_costs_;\n  Eigen::MatrixXf candidate_free_energy_;\n\n  void allocateCUDAMemory();\n\n  void copyNominalControlToDevice(bool synchronize = true);\n\n  void deallocateNominalStateCandidateMemory();\n\n  void updateCandidateMemory();\n\n  void resetCandidateCudaMem();\n\n  void getInitNominalStateCandidates(const Eigen::Ref<const state_array>& nominal_x_k,\n                                     const Eigen::Ref<const state_array>& nominal_x_kp1,\n                                     const Eigen::Ref<const state_array>& real_x_kp1);\n\n  // compute the line search weights\n  void computeLineSearchWeights();\n\n  void computeNominalStateAndStride(const Eigen::Ref<const state_array>& state, int stride);\n\n  // compute the importance sampler strides\n  void computeImportanceSamplerStride(int stride);\n\n  // Compute the baseline of the candidates\n  float computeCandidateBaseline();\n\n  // Get the best index based on the candidate free energy\n  void computeBestIndex();\n\n  // Computes and saves the feedback gains used in the rollout kernel and tracking.\n  virtual void computeNominalFeedbackGains(const Eigen::Ref<const state_array>& state);\n\n  // CUDA Memory\n  float* importance_sampling_costs_d_ = nullptr;\n  float* importance_sampling_outputs_d_ = nullptr;\n  float* importance_sampling_states_d_ = nullptr;\n  int* importance_sampling_strides_d_ = nullptr;\n  float* feedback_gain_array_d_ = nullptr;\n  kernelType use_eval_kernel_ = kernelType::USE_SPLIT_KERNELS;\n};\n\n#if __CUDACC__\n#include \"robust_mppi_controller.cu\"\n#endif\n\n#endif /* MPPI_GEN_RMPPI_CONTROLLER_CUH_ */\n"
  },
  {
    "path": "include/mppi/controllers/Tube-MPPI/tube_mppi_controller.cu",
    "content": "#include \"tube_mppi_controller.cuh\"\n#include <mppi/core/mppi_common.cuh>\n\n#define TUBE_MPPI_TEMPLATE                                                                                             \\\n  template <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS, class SAMPLING_T,              \\\n            class PARAMS_T>\n\n#define TubeMPPI TubeMPPIController<DYN_T, COST_T, FB_T, MAX_TIMESTEPS, NUM_ROLLOUTS, SAMPLING_T, PARAMS_T>\n\nTUBE_MPPI_TEMPLATE\nTubeMPPI::TubeMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt,\n                             int max_iter, float lambda, float alpha, int num_timesteps,\n                             const Eigen::Ref<const control_trajectory>& init_control_traj, cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha, num_timesteps, init_control_traj,\n                 stream)\n{\n  nominal_control_trajectory_ = init_control_traj;\n\n  // call rollout kernel with z = 2 since we have a nominal state\n  this->params_.dynamics_rollout_dim_.z = max(2, this->params_.dynamics_rollout_dim_.z);\n  this->params_.cost_rollout_dim_.z = max(2, this->params_.cost_rollout_dim_.z);\n  this->sampler_->setNumDistributions(2);\n\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // Initialize Feedback\n  this->fb_controller_->initTrackingController();\n  this->enable_feedback_ = true;\n  chooseAppropriateKernel();\n}\n\nTUBE_MPPI_TEMPLATE\nTubeMPPI::TubeMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n                             cudaStream_t stream)\n  : PARENT_CLASS(model, cost, fb_controller, sampler, params, stream)\n{\n  nominal_control_trajectory_ = this->params_.init_control_traj_;\n\n  // call rollout kernel with z = 2 since we have a nominal state\n  this->params_.dynamics_rollout_dim_.z = max(2, this->params_.dynamics_rollout_dim_.z);\n  this->params_.cost_rollout_dim_.z = max(2, this->params_.cost_rollout_dim_.z);\n  this->sampler_->setNumDistributions(2);\n\n  // Allocate CUDA memory for the controller\n  allocateCUDAMemory();\n\n  // Initialize Feedback\n  this->fb_controller_->initTrackingController();\n  this->enable_feedback_ = true;\n  chooseAppropriateKernel();\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::chooseAppropriateKernel()\n{\n  cudaDeviceProp deviceProp;\n  HANDLE_ERROR(cudaGetDeviceProperties(&deviceProp, 0));\n  unsigned single_kernel_byte_size = mppi::kernels::calcRolloutCombinedKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->params_.dynamics_rollout_dim_);\n  unsigned split_dyn_kernel_byte_size = mppi::kernels::calcRolloutDynamicsKernelSharedMemSize(\n      this->model_, this->sampler_, this->params_.dynamics_rollout_dim_);\n  unsigned split_cost_kernel_byte_size =\n      mppi::kernels::calcRolloutCostKernelSharedMemSize(this->cost_, this->sampler_, this->params_.cost_rollout_dim_);\n  unsigned vis_single_kernel_byte_size = mppi::kernels::calcVisualizeKernelSharedMemSize(\n      this->model_, this->cost_, this->sampler_, this->getNumTimesteps(), this->params_.visualize_dim_);\n\n  bool too_much_mem_single_kernel = single_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool too_much_mem_vis_kernel = vis_single_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  bool too_much_mem_split_kernel = split_dyn_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  too_much_mem_split_kernel = too_much_mem_split_kernel || split_cost_kernel_byte_size > deviceProp.sharedMemPerBlock;\n  too_much_mem_single_kernel = too_much_mem_single_kernel || too_much_mem_vis_kernel;\n\n  if (too_much_mem_split_kernel && too_much_mem_single_kernel)\n  {\n    std::string error_msg =\n        \"There is not enough shared memory on the GPU for either rollout kernel option. The combined rollout kernel \"\n        \"takes \" +\n        std::to_string(single_kernel_byte_size) + \" bytes, the cost rollout kernel takes \" +\n        std::to_string(split_cost_kernel_byte_size) + \" bytes, the dynamics rollout kernel takes \" +\n        std::to_string(split_dyn_kernel_byte_size) + \" bytes, the combined visualization kernel takes \" +\n        std::to_string(vis_single_kernel_byte_size) + \" bytes, and the max is \" +\n        std::to_string(deviceProp.sharedMemPerBlock) +\n        \" bytes. Considering lowering the corresponding thread block sizes.\";\n    throw std::runtime_error(error_msg);\n  }\n  else if (too_much_mem_single_kernel)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    return;\n  }\n  else if (too_much_mem_split_kernel)\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    return;\n  }\n\n  // Send the nominal control to the device\n  this->copyNominalControlToDevice(false);\n  state_array zero_state = this->model_->getZeroState();\n  // Send zero state to the device\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, zero_state.data(), DYN_T::STATE_DIM * sizeof(float),\n                               cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_ + DYN_T::STATE_DIM, zero_state.data(),\n                               DYN_T::STATE_DIM * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n  // Generate noise data\n  this->sampler_->generateSamples(1, 0, this->gen_, true);\n\n  float single_kernel_time_ms = std::numeric_limits<float>::infinity();\n  float split_kernel_time_ms = std::numeric_limits<float>::infinity();\n\n  // Evaluate each kernel that is applicable\n  auto start_single_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !too_much_mem_single_kernel; i++)\n  {\n    mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->trajectory_costs_d_,\n        this->params_.dynamics_rollout_dim_, this->stream_, true);\n  }\n  auto end_single_kernel_time = std::chrono::steady_clock::now();\n  auto start_split_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < this->getNumKernelEvaluations() && !too_much_mem_split_kernel; i++)\n  {\n    mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n        this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n        this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->trajectory_costs_d_,\n        this->params_.dynamics_rollout_dim_, this->params_.cost_rollout_dim_, this->stream_, true);\n  }\n  auto end_split_kernel_time = std::chrono::steady_clock::now();\n\n  // calc times\n  if (!too_much_mem_single_kernel)\n  {\n    single_kernel_time_ms = mppi::math::timeDiffms(end_single_kernel_time, start_single_kernel_time);\n  }\n  if (!too_much_mem_split_kernel)\n  {\n    split_kernel_time_ms = mppi::math::timeDiffms(end_split_kernel_time, start_split_kernel_time);\n  }\n  std::string kernel_choice = \"\";\n  if (split_kernel_time_ms < single_kernel_time_ms)\n  {\n    this->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n    kernel_choice = \"split \";\n  }\n  else\n  {\n    this->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n    kernel_choice = \"single\";\n  }\n  this->logger_->info(\"Choosing %s kernel based on split taking %f ms and single taking %f ms after %d iterations\\n\",\n                     kernel_choice.c_str(), split_kernel_time_ms, single_kernel_time_ms,\n                     this->getNumKernelEvaluations());\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride)\n{\n  if (!nominalStateInit_)\n  {\n    // set the nominal state to the actual state\n    nominal_state_trajectory_.col(0) = state;\n    nominalStateInit_ = true;\n  }\n\n  this->free_energy_statistics_.real_sys.previousBaseline = this->getBaselineCost(0);\n  this->free_energy_statistics_.nominal_sys.previousBaseline = this->getBaselineCost(1);\n\n  //  std::cout << \"Post disturbance Actual State: \"; this->model_->printState(state.data());\n  //  std::cout << \"                Nominal State: \"; this->model_->printState(nominal_state_trajectory_.col(0).data());\n\n  // Handy reference pointers to the nominal state\n  float* trajectory_costs_nominal_d = this->trajectory_costs_d_ + NUM_ROLLOUTS;\n  float* initial_state_nominal_d = this->initial_state_d_ + DYN_T::STATE_DIM;\n\n  for (int opt_iter = 0; opt_iter < this->getNumIters(); opt_iter++)\n  {\n    // Send the initial condition to the device\n    HANDLE_ERROR(cudaMemcpyAsync(this->initial_state_d_, state.data(), DYN_T::STATE_DIM * sizeof(float),\n                                 cudaMemcpyHostToDevice, this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(initial_state_nominal_d, nominal_state_trajectory_.data(),\n                                 DYN_T::STATE_DIM * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n\n    // Send the nominal control to the device\n    copyControlToDevice(false);\n\n    // Generate noise data\n    this->sampler_->generateSamples(optimization_stride, opt_iter, this->gen_, false);\n\n    // call rollout kernel with z = 2 since we have a nominal state\n    this->params_.dynamics_rollout_dim_.z = max(2, this->params_.dynamics_rollout_dim_.z);\n    this->params_.cost_rollout_dim_.z = max(2, this->params_.cost_rollout_dim_.z);\n\n    // Launch the rollout kernel\n    if (this->getKernelChoiceAsEnum() == kernelType::USE_SPLIT_KERNELS)\n    {\n      mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n          this->getLambda(), this->getAlpha(), this->initial_state_d_, this->output_d_, this->trajectory_costs_d_,\n          this->params_.dynamics_rollout_dim_, this->params_.cost_rollout_dim_, this->stream_, false);\n    }\n    else\n    {\n      mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLING_T>(\n          this->model_, this->cost_, this->sampler_, this->getDt(), this->getNumTimesteps(), NUM_ROLLOUTS,\n          this->getLambda(), this->getAlpha(), this->initial_state_d_, this->trajectory_costs_d_,\n          this->params_.dynamics_rollout_dim_, this->stream_, false);\n    }\n\n    // Copy the costs back to the host\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n\n    HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_nominal_.data(), trajectory_costs_nominal_d,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    this->setBaseline(mppi::kernels::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS), 0);\n    this->setBaseline(mppi::kernels::computeBaselineCost(this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS), 1);\n\n    // Launch the norm exponential kernel for both actual and nominal\n    mppi::kernels::launchNormExpKernel(NUM_ROLLOUTS, this->getNormExpThreads(), this->trajectory_costs_d_,\n                                       1.0 / this->getLambda(), this->getBaselineCost(0), this->stream_, false);\n\n    mppi::kernels::launchNormExpKernel(NUM_ROLLOUTS, this->getNormExpThreads(), trajectory_costs_nominal_d,\n                                       1.0 / this->getLambda(), this->getBaselineCost(1), this->stream_, false);\n\n    HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_nominal_.data(), trajectory_costs_nominal_d,\n                                 NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n    // Compute the normalizer\n    this->setNormalizer(mppi::kernels::computeNormalizer(this->trajectory_costs_.data(), NUM_ROLLOUTS), 0);\n    this->setNormalizer(mppi::kernels::computeNormalizer(this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS), 1);\n\n    // Compute real free energy\n    mppi::kernels::computeFreeEnergy(this->free_energy_statistics_.real_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.real_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.real_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_.data(), NUM_ROLLOUTS, this->getBaselineCost(0),\n                                     this->getLambda());\n\n    // Compute Nominal State free Energy\n    mppi::kernels::computeFreeEnergy(this->free_energy_statistics_.nominal_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.nominal_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.nominal_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS, this->getBaselineCost(1),\n                                     this->getLambda());\n\n    // Compute the cost weighted average\n    this->sampler_->updateDistributionParamsFromDevice(this->trajectory_costs_d_, this->getNormalizerCost(0), 0, false);\n    this->sampler_->updateDistributionParamsFromDevice(trajectory_costs_nominal_d, this->getNormalizerCost(1), 1,\n                                                       false);\n\n    // Transfer the new control to the host\n    this->sampler_->setHostOptimalControlSequence(this->control_.data(), 0, false);\n    this->sampler_->setHostOptimalControlSequence(this->nominal_control_trajectory_.data(), 1, true);\n\n    // Compute the nominal and actual state trajectories\n    computeStateTrajectory(state);  // Input is the actual state\n\n    // this->logger_->debug(\"Actual baseline: %f\\n\", this->getBaselineCost(0));\n    // this->logger_->debug(\"Nominal baseline: %f\\n\", this->getBaselineCost(1));\n\n    if (this->getBaselineCost(0) < this->getBaselineCost(1) + getNominalThreshold())\n    {\n      // In this case, the disturbance the made the nominal and actual states differ improved the cost.\n      // std::copy(state_trajectory.begin(), state_trajectory.end(), nominal_state_trajectory_.begin());\n      // std::copy(control_trajectory.begin(), control_trajectory.end(), nominal_control_.begin());\n      this->free_energy_statistics_.nominal_state_used = 0;\n      nominal_state_trajectory_ = this->state_;\n      nominal_control_trajectory_ = this->control_;\n    }\n    else\n    {\n      this->free_energy_statistics_.nominal_state_used = 1;\n    }\n\n    // Outside of this loop, we will utilize the nominal state trajectory and the nominal control trajectory to compute\n    // the optimal feedback gains using our ancillary controller, then apply feedback inside our main while loop at the\n    // same rate as our state estimator.\n  }\n  smoothControlTrajectory();\n  computeStateTrajectory(state);  // Input is the actual state\n\n  this->free_energy_statistics_.real_sys.normalizerPercent = this->getNormalizerCost(0) / NUM_ROLLOUTS;\n  this->free_energy_statistics_.real_sys.increase =\n      this->getBaselineCost(0) - this->free_energy_statistics_.real_sys.previousBaseline;\n  this->free_energy_statistics_.nominal_sys.normalizerPercent = this->getNormalizerCost(1) / NUM_ROLLOUTS;\n  this->free_energy_statistics_.nominal_sys.increase =\n      this->getBaselineCost(1) - this->free_energy_statistics_.nominal_sys.previousBaseline;\n\n  // Copy back sampled trajectories\n  this->copySampledControlFromDevice(false);\n  this->copyTopControlFromDevice(true);\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::copyControlToDevice(bool synchronize)\n{\n  this->sampler_->copyImportanceSamplerToDevice(this->control_.data(), 0, false);\n  this->sampler_->copyImportanceSamplerToDevice(this->nominal_control_trajectory_.data(), 1, synchronize);\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::allocateCUDAMemory()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper(1);\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::slideControlSequence(int steps)\n{\n  // Propagate the nominal trajectory forward\n  updateNominalState(nominal_control_trajectory_.col(0));\n\n  // Save the control history\n  this->saveControlHistoryHelper(steps, nominal_control_trajectory_, this->control_history_);\n\n  this->slideControlSequenceHelper(steps, nominal_control_trajectory_);\n  this->slideControlSequenceHelper(steps, this->control_);\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::smoothControlTrajectory()\n{\n  this->smoothControlTrajectoryHelper(nominal_control_trajectory_, this->control_history_);\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::computeStateTrajectory(const Eigen::Ref<const state_array>& x0_actual)\n{\n  // update the nominal state\n  this->computeStateTrajectoryHelper(nominal_state_trajectory_, nominal_state_trajectory_.col(0),\n                                     nominal_control_trajectory_);\n  // update the actual state\n  this->computeStateTrajectoryHelper(this->state_, x0_actual, this->control_);\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::updateNominalState(const Eigen::Ref<const control_array>& u)\n{\n  state_array xdot;\n  output_array output;\n  this->model_->step(nominal_state_trajectory_.col(0), nominal_state_trajectory_.col(0), xdot, u, output, 0,\n                     this->getDt());\n}\n\nTUBE_MPPI_TEMPLATE\nvoid TubeMPPI::calculateSampledStateTrajectories()\n{\n  int num_sampled_trajectories = this->getTotalSampledTrajectories();\n  // control already copied in compute control, so run kernel\n  mppi::kernels::launchVisualizeCostKernel<COST_T, SAMPLING_T>(\n      this->cost_->cost_d_, this->sampler_->sampling_d_, this->getDt(), this->getNumTimesteps(),\n      num_sampled_trajectories, this->getLambda(), this->getAlpha(), this->sampled_outputs_d_,\n      this->sampled_crash_status_d_, this->sampled_costs_d_, this->params_.cost_rollout_dim_, this->stream_, false);\n\n  // copy back results\n  for (int i = 0; i < num_sampled_trajectories * 2; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_trajectories_[i].data(),\n                                 this->sampled_outputs_d_ + i * this->getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 this->getNumTimesteps() * DYN_T::OUTPUT_DIM * sizeof(float), cudaMemcpyDeviceToHost,\n                                 this->vis_stream_));\n  }\n  HANDLE_ERROR(cudaMemcpyAsync(this->sampled_costs_.data(), this->sampled_costs_d_,\n                               this->getNumTimesteps() * 2 * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(this->sampled_crash_status_.data(), this->sampled_crash_status_d_,\n                               this->getNumTimesteps() * 2 * sizeof(float), cudaMemcpyDeviceToHost, this->vis_stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n}\n"
  },
  {
    "path": "include/mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh",
    "content": "/**\n * Created by Manan Gandhi on 2/14/2020\n * API for interfacing with the TUBE MPPI controller.\n */\n\n#ifndef MPPIGENERIC_TUBE_MPPI_CONTROLLER_CUH\n#define MPPIGENERIC_TUBE_MPPI_CONTROLLER_CUH\n\n// #include <curand.h>\n#include <mppi/controllers/controller.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <chrono>\n#include <memory>\n#include <iostream>\n\ntemplate <int S_DIM, int C_DIM, int MAX_TIMESTEPS>\nstruct TubeMPPIParams : public ControllerParams<S_DIM, C_DIM, MAX_TIMESTEPS>\n{\n  float nominal_threshold_ = 20;  // How much worse the actual system has to be compared to the nominal\n};\n\ntemplate <class DYN_T, class COST_T, class FB_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS,\n          class SAMPLING_T = ::mppi::sampling_distributions::GaussianDistribution<typename DYN_T::DYN_PARAMS_T>,\n          class PARAMS_T = TubeMPPIParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>>\nclass TubeMPPIController : public Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T>\n{\npublic:\n  //    EIGEN_MAKE_ALIGNED_OPERATOR_NEW unnecessary due to EIGEN_MAX_ALIGN_BYTES=0\n  /**\n   * Set up useful types\n   */\n  typedef Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T> PARENT_CLASS;\n  using control_array = typename PARENT_CLASS::control_array;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using state_trajectory = typename PARENT_CLASS::state_trajectory;\n  using state_array = typename PARENT_CLASS::state_array;\n  using output_array = typename PARENT_CLASS::output_array;\n  using sampled_cost_traj = typename PARENT_CLASS::sampled_cost_traj;\n  using FEEDBACK_PARAMS = typename PARENT_CLASS::TEMPLATED_FEEDBACK_PARAMS;\n  using FEEDBACK_GPU = typename PARENT_CLASS::TEMPLATED_FEEDBACK_GPU;\n\n  TubeMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt, int max_iter,\n                     float lambda, float alpha, int num_timesteps = MAX_TIMESTEPS,\n                     const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n                     cudaStream_t stream = nullptr);\n\n  TubeMPPIController(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n                     cudaStream_t stream = nullptr);\n\n  void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride = 1) override;\n\n  std::string getControllerName()\n  {\n    return \"Tube MPPI\";\n  };\n\n  /**\n   * returns the current nominal control sequence\n   */\n  control_trajectory getControlSeq() const override\n  {\n    return nominal_control_trajectory_;\n  };\n\n  /**\n   * returns the current nominal state sequence\n   */\n  state_trajectory getTargetStateSeq() const override\n  {\n    return nominal_state_trajectory_;\n  };\n\n  /**\n   * returns the current control sequence\n   */\n  control_trajectory getActualControlSeq()\n  {\n    return this->control_;\n  };\n\n  /**\n   * Slide the control sequence back n steps\n   */\n  void slideControlSequence(int steps) override;\n\n  void smoothControlTrajectory();\n\n  void updateNominalState(const Eigen::Ref<const control_array>& u);\n\n  float getNominalThreshold() const\n  {\n    return this->params_.nominal_threshold_;\n  }\n  void setNominalThreshold(float threshold)\n  {\n    this->params_.nominal_threshold_ = threshold;\n  }\n\n  void setPercentageSampledControlTrajectories(float new_perc)\n  {\n    this->setPercentageSampledControlTrajectoriesHelper(new_perc, 2);\n  }\n\n  void calculateSampledStateTrajectories() override;\n\n  void chooseAppropriateKernel();\n\nprivate:\n  // float nominal_threshold_ = 20;  // How much worse the actual system has to be compared to the nominal\n\n  // Free energy variables\n  float nominal_free_energy_mean_ = 0;\n  float nominal_free_energy_variance_ = 0;\n  float nominal_free_energy_modified_variance_ = 0;\n\n  // nominal state CPU side copies\n  control_trajectory nominal_control_trajectory_ = control_trajectory::Zero();\n  state_trajectory nominal_state_trajectory_ = state_trajectory::Zero();\n  sampled_cost_traj trajectory_costs_nominal_ = sampled_cost_traj::Zero();\n\n  // Check to see if nominal state has been initialized\n  bool nominalStateInit_ = false;\n\n  void computeStateTrajectory(const Eigen::Ref<const state_array>& x0_actual);\n\nprotected:\n  // TODO move up and generalize, pass in what to copy and initial location\n  void copyControlToDevice(bool synchronize = true);\n\nprivate:\n  // ======== PURE VIRTUAL =========\n  void allocateCUDAMemory();\n  // ======== PURE VIRTUAL END =====\n};\n\n#if __CUDACC__\n#include \"tube_mppi_controller.cu\"\n#endif\n\n#endif\n"
  },
  {
    "path": "include/mppi/controllers/controller.cu",
    "content": "#include <mppi/controllers/controller.cuh>\n\n#define CONTROLLER_TEMPLATE                                                                                            \\\n  template <class DYN_T, class COST_T, class FB_T, class SAMPLING_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS,              \\\n            class PARAMS_T>\n#define CONTROLLER Controller<DYN_T, COST_T, FB_T, SAMPLING_T, MAX_TIMESTEPS, NUM_ROLLOUTS, PARAMS_T>\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::deallocateCUDAMemory()\n{\n  if (CUDA_mem_init_)\n  {\n    HANDLE_ERROR(cudaFree(initial_state_d_));\n    HANDLE_ERROR(cudaFree(vis_initial_state_d_));\n    HANDLE_ERROR(cudaFree(control_d_));\n    HANDLE_ERROR(cudaFree(output_d_));\n    HANDLE_ERROR(cudaFree(trajectory_costs_d_));\n    HANDLE_ERROR(cudaFree(cost_baseline_and_norm_d_));\n    CUDA_mem_init_ = false;\n  }\n  if (sampled_states_CUDA_mem_init_)\n  {\n    HANDLE_ERROR(cudaFree(sampled_outputs_d_));\n    HANDLE_ERROR(cudaFree(sampled_costs_d_));\n    sampled_states_CUDA_mem_init_ = false;\n  }\n}\n\n// CONTROLLER_TEMPLATE\n// void CONTROLLER::copyControlStdDevToDevice(bool synchronize)\n// {\n//   if (!CUDA_mem_init_)\n//   {\n//     return;\n//   }\n//   HANDLE_ERROR(cudaMemcpyAsync(control_std_dev_d_, params_.control_std_dev_.data(),\n//                                sizeof(float) * params_.control_std_dev_.size(), cudaMemcpyHostToDevice, stream_));\n//   if (synchronize)\n//   {\n//     HANDLE_ERROR(cudaStreamSynchronize(stream_));\n//   }\n// }\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::copyNominalControlToDevice(bool synchronize)\n{\n  if (!CUDA_mem_init_)\n  {\n    return;\n  }\n  this->sampler_->copyImportanceSamplerToDevice(control_.data(), 0, synchronize);\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::copySampledControlFromDevice(bool synchronize)\n{\n  // if mem is not inited don't use it\n  if (!sampled_states_CUDA_mem_init_)\n  {\n    return;\n  }\n\n  int num_sampled_trajectories = perc_sampled_control_trajectories_ * NUM_ROLLOUTS;\n  std::vector<int> samples(num_sampled_trajectories);\n  if (perc_sampled_control_trajectories_ > 0.98)\n  {\n    // if above threshold just do everything\n    std::iota(samples.begin(), samples.end(), 0);\n  }\n  else\n  {\n    // Create sample list without replacement\n    // removes the top 2% since top 1% are complete noise\n    samples = mppi::math::sample_without_replacement(num_sampled_trajectories, NUM_ROLLOUTS * 0.98);\n  }\n\n  // this explicitly adds the optimized control sequence\n  HANDLE_ERROR(cudaMemcpyAsync(this->sampled_outputs_d_, this->output_.data(),\n                               sizeof(float) * getNumTimesteps() * DYN_T::OUTPUT_DIM, cudaMemcpyHostToDevice,\n                               this->vis_stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(\n      //  this->sampled_noise_d_,\n      this->sampler_->getVisControlSample(0, 0, 0), this->control_d_,\n      sizeof(float) * getNumTimesteps() * DYN_T::CONTROL_DIM, cudaMemcpyDeviceToDevice, this->vis_stream_));\n\n  for (int i = 1; i < num_sampled_trajectories; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_outputs_d_ + i * getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 this->output_d_ + samples[i] * getNumTimesteps() * DYN_T::OUTPUT_DIM,\n                                 sizeof(float) * getNumTimesteps() * DYN_T::OUTPUT_DIM, cudaMemcpyDeviceToDevice,\n                                 this->vis_stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(\n        //  this->sampled_noise_d_ + i * getNumTimesteps() * DYN_T::CONTROL_DIM,\n        this->sampler_->getVisControlSample(i, 0, 0), this->sampler_->getControlSample(samples[i], 0, 0),\n        //  this->control_noise_d_ + samples[i] * getNumTimesteps() * DYN_T::CONTROL_DIM,\n        sizeof(float) * getNumTimesteps() * DYN_T::CONTROL_DIM, cudaMemcpyDeviceToDevice, this->vis_stream_));\n  }\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n  }\n}\n\nCONTROLLER_TEMPLATE\nstd::pair<int, float> CONTROLLER::findMinIndexAndValue(std::vector<int>& temp_list)\n{\n  if (temp_list.size() == 0)\n  {\n    return std::make_pair(0, 0.0);\n  }\n  int min_sample_index = 0;\n  float min_sample_value = this->trajectory_costs_[temp_list[min_sample_index]];\n\n  for (int index = 1; index < temp_list.size(); index++)\n  {\n    if (this->trajectory_costs_[temp_list[index]] < min_sample_value)\n    {\n      min_sample_value = this->trajectory_costs_[temp_list[index]];\n      min_sample_index = index;\n    }\n  }\n  return std::make_pair(min_sample_index, min_sample_value);\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::copyTopControlFromDevice(bool synchronize)\n{\n  // if mem is not inited don't use it\n  if (!sampled_states_CUDA_mem_init_ || num_top_control_trajectories_ <= 0)\n  {\n    return;\n  }\n\n  // Important note: Highest weighted trajectories are the ones with the lowest cost\n  int start_top_control_traj_index = perc_sampled_control_trajectories_ * NUM_ROLLOUTS;\n  std::vector<int> samples(num_top_control_trajectories_);\n  // Start by filling in the top samples list with the first n in the trajectory\n  for (int i = 0; i < num_top_control_trajectories_; i++)\n  {\n    samples[i] = i;\n  }\n\n  // Calculate min weight in the current top samples list\n  int min_sample_index = 0;\n  float min_sample_value = 0;\n  std::tie(min_sample_index, min_sample_value) = findMinIndexAndValue(samples);\n\n  // find top n samples by removing the smallest weights from the list\n  for (int i = num_top_control_trajectories_; i < NUM_ROLLOUTS; i++)\n  {\n    if (trajectory_costs_[i] > min_sample_value)\n    {  // Remove the smallest weight in the current list and add the new index\n      samples[min_sample_index] = i;\n      // recalculate min weight in the current list\n      std::tie(min_sample_index, min_sample_value) = findMinIndexAndValue(samples);\n    }\n  }\n\n  // Copy top n samples to this->sampled_noise_d_ after the randomly sampled trajectories\n  top_n_costs_.resize(num_top_control_trajectories_);\n  for (int i = 0; i < num_top_control_trajectories_; i++)\n  {\n    top_n_costs_[i] = trajectory_costs_[samples[i]] / getNormalizerCost();\n    HANDLE_ERROR(cudaMemcpyAsync(\n        this->sampled_outputs_d_ + (start_top_control_traj_index + i) * getNumTimesteps() * DYN_T::OUTPUT_DIM,\n        this->output_d_ + samples[i] * getNumTimesteps() * DYN_T::OUTPUT_DIM,\n        sizeof(float) * getNumTimesteps() * DYN_T::OUTPUT_DIM, cudaMemcpyDeviceToDevice, this->vis_stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(\n        // this->sampled_noise_d_ + (start_top_control_traj_index + i) * getNumTimesteps() * DYN_T::CONTROL_DIM,\n        this->sampler_->getVisControlSample(start_top_control_traj_index + i, 0, 0),\n        this->sampler_->getControlSample(samples[i], 0, 0),\n        // this->control_noise_d_ + samples[i] * getNumTimesteps() * DYN_T::CONTROL_DIM,\n        sizeof(float) * getNumTimesteps() * DYN_T::CONTROL_DIM, cudaMemcpyDeviceToDevice, this->vis_stream_));\n  }\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->vis_stream_));\n  }\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::setCUDAStream(cudaStream_t stream)\n{\n  stream_ = stream;\n  model_->bindToStream(stream);\n  cost_->bindToStream(stream);\n  fb_controller_->bindToStream(stream);\n  sampler_->bindToStream(stream);\n  curandSetStream(gen_, stream);  // requires the generator to be created!\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::createAndSeedCUDARandomNumberGen()\n{\n  // Seed the PseudoRandomGenerator with the CPU time.\n  curandCreateGenerator(&gen_, CURAND_RNG_PSEUDO_DEFAULT);\n  setSeedCUDARandomNumberGen(this->params_.seed_);\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::setSeedCUDARandomNumberGen(unsigned seed)\n{\n  // Seed the PseudoRandomGenerator with the CPU time.\n  curandSetPseudoRandomGeneratorSeed(gen_, seed);\n  // Reset the offset so setting the seed multiple times returns the same samples\n  curandSetGeneratorOffset(gen_, 0);\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::allocateCUDAMemoryHelper(int nominal_size, bool allocate_double_noise)\n{\n  if (nominal_size < 0)\n  {\n    nominal_size = 1;\n    std::cerr << \"nominal size cannot be below 0 when allocateCudaMemoryHelper is called\" << std::endl;\n    std::exit(-1);\n  }\n  else\n  {\n    // increment by 1 since actual is not included\n    ++nominal_size;\n  }\n  HANDLE_ERROR(cudaMalloc((void**)&initial_state_d_, sizeof(float) * DYN_T::STATE_DIM * nominal_size));\n  HANDLE_ERROR(cudaMalloc((void**)&vis_initial_state_d_, sizeof(float) * DYN_T::STATE_DIM * nominal_size));\n  HANDLE_ERROR(cudaMalloc((void**)&control_d_, sizeof(float) * DYN_T::CONTROL_DIM * MAX_TIMESTEPS * nominal_size));\n  HANDLE_ERROR(\n      cudaMalloc((void**)&output_d_, sizeof(float) * DYN_T::OUTPUT_DIM * MAX_TIMESTEPS * NUM_ROLLOUTS * nominal_size));\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d_, sizeof(float) * NUM_ROLLOUTS * nominal_size));\n  // HANDLE_ERROR(cudaMalloc((void**)&control_std_dev_d_, sizeof(float) * DYN_T::CONTROL_DIM));\n  // HANDLE_ERROR(cudaMalloc((void**)&control_noise_d_, sizeof(float) * DYN_T::CONTROL_DIM * MAX_TIMESTEPS *\n  // NUM_ROLLOUTS *\n  //                                                        (allocate_double_noise ? nominal_size : 1)));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_baseline_and_norm_d_, sizeof(float2) * nominal_size));\n  cost_baseline_and_norm_.resize(nominal_size, make_float2(0.0, 0.0));\n  CUDA_mem_init_ = true;\n}\n\nCONTROLLER_TEMPLATE\nvoid CONTROLLER::resizeSampledControlTrajectories(float perc, int multiplier, int top_num)\n{\n  int num_sampled_trajectories = perc * NUM_ROLLOUTS + top_num;\n\n  if (sampled_states_CUDA_mem_init_)\n  {\n    cudaFree(sampled_outputs_d_);\n    // cudaFree(sampled_noise_d_);\n    cudaFree(sampled_costs_d_);\n    cudaFree(sampled_crash_status_d_);\n    sampled_states_CUDA_mem_init_ = false;\n  }\n  sampled_trajectories_.resize(num_sampled_trajectories * multiplier, output_trajectory::Zero());\n  sampled_costs_.resize(num_sampled_trajectories * multiplier, cost_trajectory::Zero());\n  sampled_crash_status_.resize(num_sampled_trajectories * multiplier, crash_status_trajectory::Zero());\n  sampler_->setNumVisRollouts(num_sampled_trajectories);\n  if (num_sampled_trajectories <= 0)\n  {\n    return;\n  }\n\n  HANDLE_ERROR(cudaMalloc((void**)&sampled_outputs_d_,\n                          sizeof(float) * DYN_T::OUTPUT_DIM * MAX_TIMESTEPS * num_sampled_trajectories * multiplier));\n  // HANDLE_ERROR(cudaMalloc((void**)&sampled_noise_d_,\n  //                         sizeof(float) * DYN_T::CONTROL_DIM * MAX_TIMESTEPS * num_sampled_trajectories *\n  //                         multiplier));\n  // +1 for terminal cost\n  HANDLE_ERROR(cudaMalloc((void**)&sampled_costs_d_,\n                          sizeof(float) * (MAX_TIMESTEPS + 1) * num_sampled_trajectories * multiplier));\n  HANDLE_ERROR(cudaMalloc((void**)&sampled_crash_status_d_,\n                          sizeof(int) * MAX_TIMESTEPS * num_sampled_trajectories * multiplier));\n  sampled_states_CUDA_mem_init_ = true;\n}\n\nCONTROLLER_TEMPLATE\nstd::vector<float> CONTROLLER::getSampledNoise()\n{\n  std::vector<float> vector = std::vector<float>(NUM_ROLLOUTS * getNumTimesteps() * DYN_T::CONTROL_DIM, FLT_MIN);\n\n  HANDLE_ERROR(cudaMemcpyAsync(vector.data(), this->sampler_->getControlSample(0, 0, 0),\n                               sizeof(float) * NUM_ROLLOUTS * getNumTimesteps() * DYN_T::CONTROL_DIM,\n                               cudaMemcpyDeviceToHost, stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(stream_));\n  return vector;\n}\n\n#undef CONTROLLER_TEMPLATE\n#undef CONTROLLER\n"
  },
  {
    "path": "include/mppi/controllers/controller.cuh",
    "content": "//\n// Created by jason on 10/30/19.\n//\n\n#ifndef MPPIGENERIC_CONTROLLER_CUH\n#define MPPIGENERIC_CONTROLLER_CUH\n\n#include <array>\n#include <Eigen/Core>\n#include <chrono>\n#include \"curand.h\"\n\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/feedback_controllers/feedback.cuh>\n#include <mppi/utils/gpu_err_chk.cuh>\n#include <mppi/utils/logger.hpp>\n#include <mppi/utils/math_utils.h>\n\n#include <cfloat>\n#include <utility>\n\nstruct freeEnergyEstimate\n{\n  float increase = -1;\n  float previousBaseline = -1;\n  float freeEnergyMean = -1;\n  float freeEnergyVariance = -1;\n  float freeEnergyModifiedVariance = -1;\n  float normalizerPercent = -1;\n};\n\nstruct MPPIFreeEnergyStatistics\n{\n  int nominal_state_used = 0;\n\n  freeEnergyEstimate nominal_sys;\n  freeEnergyEstimate real_sys;\n};\n\nenum class kernelType : int\n{\n  USE_SINGLE_KERNEL = 0,  // combine dynamics and cost calls into a single kernel\n  USE_SPLIT_KERNELS,      // separate kernels for dynamics and cost calls\n};\n\ntemplate <int S_DIM, int C_DIM, int MAX_TIMESTEPS>\nstruct ControllerParams\n{\n  static const int TEMPLATED_STATE_DIM = S_DIM;\n  static const int TEMPLATED_CONTROL_DIM = C_DIM;\n  static const int TEMPLATED_MAX_TIMESTEPS = MAX_TIMESTEPS;\n  float dt_;\n  float lambda_ = 1.0;  // Value of the temperature in the softmax.\n  float alpha_ = 0.0;   //\n  // MAX_TIMESTEPS is defined as an upper bound, if lower that region is just ignored when calculating control\n  // does not reallocate cuda memory\n  int num_timesteps_ = MAX_TIMESTEPS;\n  int num_iters_ = 1;  // Number of optimization iterations\n  unsigned seed_ = std::chrono::system_clock::now().time_since_epoch().count();\n\n  dim3 dynamics_rollout_dim_;\n  dim3 cost_rollout_dim_;\n  dim3 visualize_dim_ = dim3(32, 1, 1);\n  int norm_exp_kernel_parallelization_ = 64;\n\n  Eigen::Matrix<float, C_DIM, MAX_TIMESTEPS> init_control_traj_ = Eigen::Matrix<float, C_DIM, MAX_TIMESTEPS>::Zero();\n  Eigen::Matrix<float, C_DIM, 1> slide_control_scale_ = Eigen::Matrix<float, C_DIM, 1>::Zero();\n};\n\ntemplate <class DYN_T, class COST_T, class FB_T, class SAMPLING_T, int MAX_TIMESTEPS, int NUM_ROLLOUTS,\n          class PARAMS_T = ControllerParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>>\nclass Controller\n{\npublic:\n  // EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /**\n   * typedefs for access to templated class from outside classes\n   */\n  typedef DYN_T TEMPLATED_DYNAMICS;\n  typedef COST_T TEMPLATED_COSTS;\n  typedef FB_T TEMPLATED_FEEDBACK;\n  typedef PARAMS_T TEMPLATED_PARAMS;\n  typedef SAMPLING_T TEMPLATED_SAMPLING;\n  using TEMPLATED_FEEDBACK_STATE = typename FB_T::TEMPLATED_FEEDBACK_STATE;\n  using TEMPLATED_FEEDBACK_PARAMS = typename FB_T::TEMPLATED_PARAMS;\n  using TEMPLATED_FEEDBACK_GPU = typename FB_T::TEMPLATED_GPU_FEEDBACK;\n  using TEMPLATED_SAMPLING_PARAMS = typename SAMPLING_T::SAMPLING_PARAMS_T;\n  static const int TEMPLATED_FEEDBACK_TIMESTEPS = FB_T::FB_TIMESTEPS;\n\n  /**\n   * Aliases\n   */\n  // Control typedefs\n  using control_array = typename DYN_T::control_array;\n  typedef Eigen::Matrix<float, DYN_T::CONTROL_DIM, MAX_TIMESTEPS> control_trajectory;  // A control trajectory\n\n  // State typedefs\n  using state_array = typename DYN_T::state_array;\n  typedef Eigen::Matrix<float, DYN_T::STATE_DIM, MAX_TIMESTEPS> state_trajectory;  // A state trajectory\n\n  // Output typedefs\n  using output_array = typename DYN_T::output_array;\n  typedef Eigen::Matrix<float, DYN_T::OUTPUT_DIM, MAX_TIMESTEPS> output_trajectory;  // An output trajectory\n\n  // Cost typedefs\n  typedef Eigen::Matrix<float, MAX_TIMESTEPS + 1, 1> cost_trajectory;  // +1 for terminal cost\n  typedef Eigen::Matrix<float, NUM_ROLLOUTS, 1> sampled_cost_traj;\n  typedef Eigen::Matrix<int, MAX_TIMESTEPS, 1> crash_status_trajectory;\n\n  Controller(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, float dt, int max_iter, float lambda,\n             float alpha, int num_timesteps = MAX_TIMESTEPS,\n             const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n             cudaStream_t stream = nullptr)\n  {\n    // Create the random number generator\n    createAndSeedCUDARandomNumberGen();\n    model_ = model;\n    cost_ = cost;\n    fb_controller_ = fb_controller;\n    sampler_ = sampler;\n    sampler_->setNumRollouts(NUM_ROLLOUTS);\n    sampler_->setNumDistributions(1);\n    TEMPLATED_PARAMS params;\n    params.dt_ = dt;\n    params.num_iters_ = max_iter;\n    params.lambda_ = lambda;\n    params.alpha_ = alpha;\n    params.num_timesteps_ = num_timesteps;\n    params.init_control_traj_ = init_control_traj;\n    setNumTimesteps(params.num_timesteps_);\n    setParams(params);\n\n    control_ = init_control_traj;\n    control_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n\n    // Bind the model and control to the given stream\n    setCUDAStream(stream);\n    // Create new stream for visualization purposes\n    HANDLE_ERROR(cudaStreamCreate(&vis_stream_));\n\n    GPUSetup();\n\n    auto logger = std::make_shared<mppi::util::MPPILogger>();\n    setLogger(logger);\n\n    /**\n     * When implementing your own version make sure to write your own allocateCUDAMemory and call it from the\n     * constructor along with any other methods to copy memory to the device and back\n     */\n    // TODO pass function pointer?\n  }\n\n  Controller(DYN_T* model, COST_T* cost, FB_T* fb_controller, SAMPLING_T* sampler, PARAMS_T& params,\n             cudaStream_t stream = nullptr)\n  {\n    model_ = model;\n    cost_ = cost;\n    fb_controller_ = fb_controller;\n    sampler_ = sampler;\n    sampler_->setNumRollouts(NUM_ROLLOUTS);\n    sampler_->setNumDistributions(1);\n    setNumTimesteps(params_.num_timesteps_);\n    // Create the random number generator\n    createAndSeedCUDARandomNumberGen();\n    setParams(params);\n    control_ = params_.init_control_traj_;\n    control_history_ = Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>::Zero();\n\n    // Bind the model and control to the given stream\n    setCUDAStream(stream);\n    // Create new stream for visualization purposes\n    HANDLE_ERROR(cudaStreamCreate(&vis_stream_));\n\n    GPUSetup();\n\n    auto logger = std::make_shared<mppi::util::MPPILogger>();\n    setLogger(logger);\n\n    /**\n     * When implementing your own version make sure to write your own allocateCUDAMemory and call it from the\n     * constructor along with any other methods to copy memory to the device and back\n     */\n    // TODO pass function pointer?\n  }\n\n  // TODO should be private with test as a friend to ensure it is only used in testing\n  Controller() = default;\n\n  /**\n   * Destructor must be virtual so that children are properly\n   * destroyed when called from a basePlant reference\n   */\n  virtual ~Controller()\n  {\n    // Free the CUDA memory of every object\n    if (model_)\n    {\n      model_->freeCudaMem();\n    }\n    if (cost_)\n    {\n      cost_->freeCudaMem();\n    }\n    if (fb_controller_)\n    {\n      fb_controller_->freeCudaMem();\n    }\n    if (sampler_)\n    {\n      sampler_->freeCudaMem();\n    }\n\n    // Free the CUDA memory of the controller\n    deallocateCUDAMemory();\n  }\n\n  // =================== METHODS THAT SHOULD HAVE NO DEFAULT ==========================\n  // ======== PURE VIRTUAL =========\n  /**\n   * Given a state, calculates the optimal control sequence using MPPI according\n   * to the cost function used as part of the template\n   * @param state - the current state from which we would like to calculate\n   * a control sequence\n   */\n  virtual void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride) = 0;\n\n  /**\n   * Call a kernel to evaluate the sampled state trajectories for visualization\n   * and debugging.\n   */\n  virtual void calculateSampledStateTrajectories() = 0;\n\n  // ================ END OF METHODS WITH NO DEFAULT =============\n  // ======== PURE VIRTUAL END =====\n\n  virtual std::string getControllerName() const\n  {\n    return \"name not set\";\n  };\n\n  virtual std::string getCostFunctionName() const\n  {\n    return cost_->getCostFunctionName();\n  }\n\n  virtual std::string getDynamicsModelName() const\n  {\n    return model_->getDynamicsModelName();\n  }\n\n  virtual std::string getSamplingDistributionName() const\n  {\n    return sampler_->getSamplingDistributionName();\n  }\n\n  virtual std::string getFullName() const\n  {\n    return getControllerName() + \"(\" + getDynamicsModelName() + \", \" + getCostFunctionName() + \", \" +\n           getSamplingDistributionName() + \")\";\n  }\n\n  virtual void initFeedback()\n  {\n    enable_feedback_ = true;\n    fb_controller_->initTrackingController();\n  };\n\n  virtual void GPUSetup()\n  {\n    // Call the GPU setup functions of the model, cost, sampling distribution, and feedback controller\n    model_->GPUSetup();\n    cost_->GPUSetup();\n    fb_controller_->GPUSetup();\n    sampler_->setVisStream(vis_stream_);\n    sampler_->GPUSetup();\n  }\n\n  virtual std::vector<output_trajectory> getSampledOutputTrajectories() const\n  {\n    return sampled_trajectories_;\n  }\n\n  virtual std::vector<cost_trajectory> getSampledCostTrajectories() const\n  {\n    return sampled_costs_;\n  }\n\n  virtual std::vector<crash_status_trajectory> getSampledCrashStatusTrajectories() const\n  {\n    return sampled_crash_status_;\n  }\n\n  virtual std::vector<float> getTopTransformedCosts() const\n  {\n    return top_n_costs_;\n  }\n\n  virtual void chooseAppropriateKernel()\n  {\n    use_kernel_ = kernelType::USE_SPLIT_KERNELS;\n  }\n\n  /**\n   * only used in rmppi, here for generic calls in base_plant. Jank as hell\n   * @param state\n   * @param stride\n   */\n  void updateImportanceSamplingControl(const Eigen::Ref<const state_array>& state, int optimization_stride)\n  {\n  }\n\n  /**\n   * Used to update the importance sampler\n   * @param nominal_control the new nominal control sequence to sample around\n   */\n  virtual void updateImportanceSampler(const Eigen::Ref<const control_trajectory>& nominal_control)\n  {\n    // TODO copy to device new control sequence\n    control_ = nominal_control;\n  }\n\n  /**\n   * determines the control that should\n   * @param state\n   * @param rel_time\n   * @return\n   */\n  virtual control_array getCurrentControl(Eigen::Ref<state_array> state, double rel_time,\n                                          Eigen::Ref<state_array> target_nominal_state,\n                                          Eigen::Ref<control_trajectory> c_traj, TEMPLATED_FEEDBACK_STATE& fb_state)\n  {\n    // MPPI control\n    control_array u_ff = interpolateControls(rel_time, c_traj);\n    control_array u_fb = control_array::Zero();\n    if (enable_feedback_)\n    {\n      u_fb = interpolateFeedback(state, target_nominal_state, rel_time, fb_state);\n    }\n    control_array result = u_ff + u_fb;\n\n    state_array empty_state = model_->getZeroState();\n    model_->enforceConstraints(empty_state, result);\n\n    return result;\n  }\n\n  /**\n   * @param steps - number of dt's to slide control sequence forward\n   * Slide the control sequence forwards by 'steps'\n   */\n  virtual void slideControlSequence(int steps)\n  {\n    // Save the control history\n    this->saveControlHistoryHelper(steps, this->control_, this->control_history_);\n\n    this->slideControlSequenceHelper(steps, this->control_);\n  }\n\n  /**\n   * determines the interpolated control from control_seq_, linear interpolation\n   * @param rel_time time since the solution was calculated\n   * @return\n   */\n  virtual control_array interpolateControls(double rel_time, Eigen::Ref<control_trajectory> c_traj)\n  {\n    int lower_idx = (int)(rel_time / getDt());\n    int upper_idx = lower_idx + 1;\n    double alpha = (rel_time - lower_idx * getDt()) / getDt();\n\n    control_array interpolated_control;\n    control_array prev_cmd = c_traj.col(lower_idx);\n    control_array next_cmd = c_traj.col(upper_idx);\n    interpolated_control = (1 - alpha) * prev_cmd + alpha * next_cmd;\n    return interpolated_control;\n  }\n\n  virtual state_array interpolateState(Eigen::Ref<state_trajectory> s_traj, double rel_time)\n  {\n    int lower_idx = (int)(rel_time / getDt());\n    int upper_idx = lower_idx + 1;\n    double alpha = (rel_time - lower_idx * getDt()) / getDt();\n\n    return model_->interpolateState(s_traj.col(lower_idx), s_traj.col(upper_idx), alpha);\n  }\n\n  /**\n   *\n   * @param state\n   * @param rel_time\n   * @return\n   */\n  virtual control_array interpolateFeedback(Eigen::Ref<state_array> state, Eigen::Ref<state_array> target_nominal_state,\n                                            double rel_time, TEMPLATED_FEEDBACK_STATE& fb_state)\n  {\n    return fb_controller_->interpolateFeedback_(state, target_nominal_state, rel_time, fb_state);\n  }\n\n  virtual curandGenerator_t getGenerator() const\n  {\n    return gen_;\n  }\n\n  /**\n   * returns the current control sequence\n   */\n  virtual control_trajectory getControlSeq() const\n  {\n    return control_;\n  };\n\n  /**\n   * Gets the state sequence of the nominal trajectory\n   */\n  virtual state_trajectory getTargetStateSeq() const\n  {\n    return state_;\n  }\n\n  /**\n   * Gets the output sequence of the nominal trajectory\n   */\n  virtual output_trajectory getTargetOutputSeq() const\n  {\n    return output_;\n  }\n\n  /**\n   * Return all the sampled costs sequences\n   */\n  virtual sampled_cost_traj getSampledCostSeq() const\n  {\n    return trajectory_costs_;\n  };\n\n  /**\n   * Return control feedback gains\n   */\n  // TODO: Think of a better name for this method?\n  virtual TEMPLATED_FEEDBACK_STATE getFeedbackState() const\n  {\n    if (enable_feedback_)\n    {\n      return fb_controller_->getFeedbackState();\n    }\n    else\n    {\n      TEMPLATED_FEEDBACK_STATE default_state;\n      return default_state;\n    }\n  };\n\n  virtual TEMPLATED_FEEDBACK_PARAMS getFeedbackParams() const\n  {\n    if (enable_feedback_)\n    {\n      return fb_controller_->getParams();\n    }\n    else\n    {\n      TEMPLATED_FEEDBACK_PARAMS default_fb_params;\n      return default_fb_params;\n    }\n  }\n\n  // Indicator for algorithm health, should be between 0.01 and 0.1 anecdotally\n  float getNormalizerPercent() const\n  {\n    return this->getNormalizerCost() / (float)NUM_ROLLOUTS;\n  }\n\n  /**\n   * Computes the actual trajectory given the MPPI optimal control and the\n   * feedback gains computed by DDP. If feedback is not enabled, then we return\n   * zero since this function would not make sense.\n   */\n  virtual void computeFeedbackPropagatedStateSeq()\n  {\n    if (!enable_feedback_)\n    {\n      return;\n    }\n    // Compute the nominal trajectory\n    propagated_feedback_state_trajectory_.col(0) = getActualStateSeq().col(0);  // State that we optimized from\n    state_array xdot;\n    state_array current_state, next_state;\n    output_array output;\n    control_array current_control;\n    for (int i = 0; i < getNumTimesteps() - 1; ++i)\n    {\n      current_state = propagated_feedback_state_trajectory_.col(i);\n      // MPPI control apply feedback at the given timestep against the nominal trajectory at that timestep\n      current_control = getControlSeq().col(i) + getFeedbackControl(current_state, getTargetStateSeq().col(i), i);\n      model_->step(current_state, next_state, xdot, current_control, output, i, getDt());\n      propagated_feedback_state_trajectory_.col(i + 1) = next_state;\n    }\n  }\n\n  /**\n   *\n   * @return State trajectory from optimized state with MPPI control and computed feedback gains\n   */\n  state_trajectory getFeedbackPropagatedStateSeq() const\n  {\n    return propagated_feedback_state_trajectory_;\n  };\n\n  float getBaselineCost(int ind = 0) const\n  {\n    return cost_baseline_and_norm_[ind].x;\n  };\n  float getNormalizerCost(int ind = 0) const\n  {\n    return cost_baseline_and_norm_[ind].y;\n  };\n\n  /**\n   * returns the current state sequence\n   */\n  state_trajectory getActualStateSeq() const\n  {\n    return state_;\n  };\n\n  /**\n   * returns the current output sequence\n   */\n  output_trajectory getActualOutputSeq() const\n  {\n    return output_;\n  };\n\n  virtual void computeFeedbackHelper(const Eigen::Ref<const state_array>& state,\n                                     const Eigen::Ref<const state_trajectory>& state_traj,\n                                     const Eigen::Ref<const control_trajectory>& control_traj)\n  {\n    if (!enable_feedback_)\n    {\n      return;\n    }\n    fb_controller_->computeFeedback(state, state_traj, control_traj);\n  }\n\n  virtual void computeFeedback(const Eigen::Ref<const state_array>& state)\n  {\n    computeFeedbackHelper(state, getTargetStateSeq(), getControlSeq());\n  }\n\n  virtual control_array getFeedbackControl(const Eigen::Ref<const state_array>& state,\n                                           const Eigen::Ref<const state_array>& goal_state, int t)\n  {\n    return fb_controller_->k(state, goal_state, t);\n  }\n\n  void smoothControlTrajectoryHelper(Eigen::Ref<control_trajectory> u,\n                                     const Eigen::Ref<Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>>& control_history)\n  {\n    // TODO generalize to any size filter\n    // TODO does the logic of handling control history reasonable?\n\n    // Create the filter coefficients\n    Eigen::Matrix<float, 1, 5> filter_coefficients;\n    filter_coefficients << -3, 12, 17, 12, -3;\n    filter_coefficients /= 35.0;\n\n    // Create and fill a control buffer that we can apply the convolution filter\n    Eigen::MatrixXf control_buffer(getNumTimesteps() + 4, DYN_T::CONTROL_DIM);\n\n    // Fill the first two timesteps with the control history\n    control_buffer.topRows(2) = control_history.transpose();\n\n    // Fill the center timesteps with the current nominal trajectory\n    control_buffer.middleRows(2, getNumTimesteps()) = u.transpose();\n\n    // Fill the last two timesteps with the end of the current nominal control trajectory\n    control_buffer.row(getNumTimesteps() + 2) = u.transpose().row(getNumTimesteps() - 1);\n    control_buffer.row(getNumTimesteps() + 3) = u.transpose().row(getNumTimesteps() - 1);\n\n    // Apply convolutional filter to each timestep\n    for (int i = 0; i < getNumTimesteps(); ++i)\n    {\n      u.col(i) = (filter_coefficients * control_buffer.middleRows(i, 5)).transpose();\n    }\n  }\n\n  virtual void slideControlSequenceHelper(int steps, Eigen::Ref<control_trajectory> u)\n  {\n    for (int i = 0; i < getNumTimesteps(); ++i)\n    {\n      int ind = std::min(i + steps, getNumTimesteps() - 1);\n      u.col(i) = u.col(ind);\n      if (i + steps > getNumTimesteps() - 1)\n      {\n        u.col(i) = (u.col(ind).array() - model_->zero_control_.array()) * params_.slide_control_scale_.array() +\n                   model_->zero_control_.array();\n      }\n    }\n  }\n\n  virtual void saveControlHistoryHelper(int steps, const Eigen::Ref<const control_trajectory>& u_trajectory,\n                                        Eigen::Ref<Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2>> u_history)\n  {\n    if (steps == 1)\n    {  // We only moved one timestep\n      u_history.col(0) = u_history.col(1);\n      u_history.col(1) = u_trajectory.col(0);\n    }\n    else if (steps >= 2)\n    {  // We have moved more than one timestep, but our history size is still only 2\n      u_history.col(0) = u_trajectory.col(steps - 2);\n      u_history.col(1) = u_trajectory.col(steps - 1);\n    }\n  }\n\n  /**\n   * Reset Controls\n   */\n  virtual void resetControls(){\n    // TODO\n  };\n\n  virtual void computeStateTrajectoryHelper(Eigen::Ref<state_trajectory> result,\n                                            const Eigen::Ref<const state_array>& x0,\n                                            const Eigen::Ref<const control_trajectory>& u)\n  {\n    result.col(0) = x0;\n    state_array xdot;\n    state_array state, next_state;\n    output_array output;\n    model_->initializeDynamics(result.col(0), u.col(0), output, 0, getDt());\n    for (int i = 0; i < getNumTimesteps() - 1; ++i)\n    {\n      state = result.col(i);\n      control_array u_i = u.col(i);\n      model_->enforceConstraints(state, u_i);\n      model_->step(state, next_state, xdot, u_i, output, i, getDt());\n      result.col(i + 1) = next_state;\n    }\n  }\n\n  virtual void computeOutputTrajectoryHelper(Eigen::Ref<output_trajectory> output_result,\n                                             Eigen::Ref<state_trajectory> state_result,\n                                             const Eigen::Ref<const state_array>& x0,\n                                             const Eigen::Ref<const control_trajectory>& u)\n  {\n    state_result.col(0) = x0;\n    state_array xdot;\n    state_array state, next_state;\n    output_array output;\n    model_->initializeDynamics(state_result.col(0), u.col(0), output, 0, getDt());\n    output_result.col(0) = output;\n    for (int i = 0; i < getNumTimesteps() - 1; ++i)\n    {\n      state = state_result.col(i);\n      control_array u_i = u.col(i);\n      model_->enforceConstraints(state, u_i);\n      model_->step(state, next_state, xdot, u_i, output, i, getDt());\n      state_result.col(i + 1) = next_state;\n      output_result.col(i + 1) = output;\n    }\n  }\n\n  void setNumTimesteps(int num_timesteps)\n  {\n    // TODO fix the tracking controller as well\n    if ((num_timesteps <= MAX_TIMESTEPS) && (num_timesteps > 0))\n    {\n      params_.num_timesteps_ = num_timesteps;\n    }\n    else\n    {\n      params_.num_timesteps_ = MAX_TIMESTEPS;\n      printf(\"You must give a number of timesteps between [0, %d]\\n\", MAX_TIMESTEPS);\n    }\n    sampler_->setNumTimesteps(params_.num_timesteps_);\n  }\n\n  void setBaseline(float baseline, int index = 0)\n  {\n    cost_baseline_and_norm_[index].x = baseline;\n  };\n\n  void setNormalizer(float normalizer, int index = 0)\n  {\n    cost_baseline_and_norm_[index].y = normalizer;\n  };\n\n  int getNumTimesteps() const\n  {\n    return this->params_.num_timesteps_;\n  }\n\n  int getNormExpThreads() const\n  {\n    return this->params_.norm_exp_kernel_parallelization_;\n  }\n\n  /**\n   * updates the scaling factor of noise for sampling around the nominal trajectory\n   */\n  // void updateControlNoiseStdDev(const Eigen::Ref<const control_array>& sigma_u)\n  // {\n  //   params_.control_std_dev_ = sigma_u;\n  //   copyControlStdDevToDevice();\n  // }\n\n  void disableFeedbackController()\n  {\n    enable_feedback_ = false;\n  }\n\n  void setFeedbackParams(const TEMPLATED_FEEDBACK_PARAMS& fb_params)\n  {\n    fb_controller_->setParams(fb_params);\n  }\n\n  bool getFeedbackEnabled() const\n  {\n    return enable_feedback_;\n  }\n\n  float getPercentageSampledControlTrajectories() const\n  {\n    return perc_sampled_control_trajectories_;\n  }\n  /**\n   * Set the percentage of sample control trajectories to copy\n   * back from the GPU. Multiplier is an integer in case the nominal\n   * control trajectories also need to be saved.\n   */\n  void setPercentageSampledControlTrajectoriesHelper(float new_perc, int multiplier = 1)\n  {\n    perc_sampled_control_trajectories_ = new_perc;\n    sample_multiplier_ = multiplier;\n    resizeSampledControlTrajectories(perc_sampled_control_trajectories_, sample_multiplier_,\n                                     num_top_control_trajectories_);\n  }\n\n  void setTopNSampledControlTrajectoriesHelper(int new_top_num_samples)\n  {\n    num_top_control_trajectories_ = new_top_num_samples;\n    resizeSampledControlTrajectories(perc_sampled_control_trajectories_, sample_multiplier_,\n                                     num_top_control_trajectories_);\n  }\n\n  void resizeSampledControlTrajectories(float perc, int multiplier, int top_num);\n\n  int getNumberSampledTrajectories() const\n  {\n    return perc_sampled_control_trajectories_ * NUM_ROLLOUTS;\n  }\n\n  int getNumberTopControlTrajectories() const\n  {\n    return num_top_control_trajectories_;\n  }\n\n  int getTotalSampledTrajectories() const\n  {\n    return getNumberSampledTrajectories() + getNumberTopControlTrajectories();\n  }\n\n  void setSlideControlScale(const Eigen::Ref<const control_array>& slide_control_scale)\n  {\n    params_.slide_control_scale_ = slide_control_scale;\n  }\n\n  /**\n   * Return the most recent free energy calculation for the mean\n   */\n  MPPIFreeEnergyStatistics getFreeEnergyStatistics() const\n  {\n    return free_energy_statistics_;\n  }\n\n  std::vector<float> getSampledNoise();\n\n  /**\n   * Public data members\n   */\n  DYN_T* model_ = nullptr;\n  COST_T* cost_ = nullptr;\n  FB_T* fb_controller_ = nullptr;\n  SAMPLING_T* sampler_ = nullptr;\n  cudaStream_t stream_;\n  cudaStream_t vis_stream_;\n\n  float getDt() const\n  {\n    return params_.dt_;\n  }\n  void setDt(float dt)\n  {\n    params_.dt_ = dt;\n    if (fb_controller_)\n    {\n      fb_controller_->setDt(dt);\n    }\n  }\n\n  float getLambda() const\n  {\n    return params_.lambda_;\n  }\n  void setLambda(float lambda)\n  {\n    params_.lambda_ = lambda;\n  }\n\n  float getAlpha() const\n  {\n    return params_.alpha_;\n  }\n  void setAlpha(float alpha)\n  {\n    params_.alpha_ = alpha;\n  }\n\n  PARAMS_T getParams() const\n  {\n    return params_;\n  }\n\n  const TEMPLATED_SAMPLING_PARAMS getSamplingParams() const\n  {\n    return sampler_->getParams();\n  }\n\n  void setSamplingParams(const TEMPLATED_SAMPLING_PARAMS& params, bool synchronize = true)\n  {\n    sampler_->setParams(params, synchronize);\n  }\n\n  void setParams(const PARAMS_T& p)\n  {\n    bool change_seed = p.seed_ != params_.seed_;\n    bool change_num_timesteps = p.num_timesteps_ != params_.num_timesteps_;\n    bool change_dt = p.dt_ != params_.dt_;\n    // bool change_std_dev = p.control_std_dev_ != params_.control_std_dev_;\n    params_ = p;\n    if (change_num_timesteps)\n    {\n      setNumTimesteps(p.num_timesteps_);\n    }\n    if (change_seed)\n    {\n      setSeedCUDARandomNumberGen(params_.seed_);\n    }\n    if (change_dt)\n    {\n      fb_controller_->setDt(p.dt_);\n    }\n  }\n\n  int getNumIters() const\n  {\n    return params_.num_iters_;\n  }\n  void setNumIters(int num_iter)\n  {\n    params_.num_iters_ = num_iter;\n  }\n\n  float getDebug() const\n  {\n    return debug_;\n  }\n  void setDebug(bool debug)\n  {\n    debug_ = debug;\n    setLogLevel(mppi::util::LOG_LEVEL::DEBUG);\n  }\n\n  int getKernelChoiceAsInt() const\n  {\n    return static_cast<int>(use_kernel_);\n  }\n\n  kernelType getKernelChoiceAsEnum() const\n  {\n    return use_kernel_;\n  }\n\n  int getNumKernelEvaluations() const\n  {\n    return num_kernel_evaluations_;\n  }\n\n  void setKernelChoice(const int& kernel_type)\n  {\n    use_kernel_ = static_cast<kernelType>(kernel_type);\n  }\n\n  void setKernelChoice(const kernelType& kernel_type)\n  {\n    use_kernel_ = kernel_type;\n  }\n\n  void setNumKernelEvaluations(const int& num_kernel_evaluations)\n  {\n    num_kernel_evaluations_ = num_kernel_evaluations;\n  }\n\n  void setCUDAStream(cudaStream_t stream);\n\n  void setLogLevel(const mppi::util::LOG_LEVEL& level)\n  {\n    logger_->setLogLevel(level);\n    model_->setLogLevel(level);\n    cost_->setLogLevel(level);\n    sampler_->setLogLevel(level);\n    fb_controller_->setLogLevel(level);\n  }\n\n  void setLogger(const mppi::util::MPPILoggerPtr& logger)\n  {\n    logger_ = logger;\n    model_->setLogger(logger);\n    cost_->setLogger(logger);\n    sampler_->setLogger(logger);\n    fb_controller_->setLogger(logger);\n  }\n\n  mppi::util::MPPILoggerPtr getLogger() const\n  {\n    return logger_;\n  }\n\n  mppi::util::MPPILoggerPtr getLogger()\n  {\n    return logger_;\n  }\n\nprotected:\n  // no default protected members\n  void deallocateCUDAMemory();\n\n  PARAMS_T params_;\n  mppi::util::MPPILoggerPtr logger_ = nullptr;\n\n  // TODO get raw pointers for different things\n  bool debug_ = false;\n  int num_kernel_evaluations_ = 10;  // number of kernel calls to do to determine which kernel to use\n  kernelType use_kernel_ = kernelType::USE_SPLIT_KERNELS;  // default is to use the split kernels\n\n  // Free energy variables\n  MPPIFreeEnergyStatistics free_energy_statistics_;\n\n  // float normalizer_;                             // Variable for the normalizing term from sampling.\n  // float baseline_ = 0;                           // Baseline cost of the system.\n  float perc_sampled_control_trajectories_ = 0;  // Percentage of sampled trajectories to return\n  int sample_multiplier_ = 1;                    // How many nominal states we are keeping track of\n  int num_top_control_trajectories_ = 0;         // Top n sampled trajectories to visualize\n  std::vector<float> top_n_costs_;\n\n  curandGenerator_t gen_;\n  // float* control_std_dev_d_;  // Array of size DYN_T::CONTROL_DIM\n  float* initial_state_d_;      // Array of sizae DYN_T::STATE_DIM * (2 if there is a nominal state)\n  float* vis_initial_state_d_;  // Array of sizae DYN_T::STATE_DIM * (2 if there is a nominal state)\n\n  Eigen::Matrix<float, DYN_T::CONTROL_DIM, 2> control_history_;\n\n  // one array of this size is allocated for each state we care about,\n  // so it can be the size*N for N nominal states\n  // [actual, nominal]\n  float* control_d_;           // Array of size DYN_T::CONTROL_DIM*NUM_TIMESTEPS*N\n  float* output_d_;            // Array of size DYN_T::OUTPUT_DIM*NUM_ROLLOUTS*N\n  float* trajectory_costs_d_;  // Array of size NUM_ROLLOUTS*N\n  // float* control_noise_d_;            // Array of size DYN_T::CONTROL_DIM*NUM_TIMESTEPS*NUM_ROLLOUTS*N\n  float2* cost_baseline_and_norm_d_;  // Array of size number of systems\n  control_trajectory control_ = control_trajectory::Zero();\n  state_trajectory state_ = state_trajectory::Zero();\n  output_trajectory output_ = output_trajectory::Zero();\n  sampled_cost_traj trajectory_costs_ = sampled_cost_traj::Zero();\n  std::vector<float2> cost_baseline_and_norm_ = { make_float2(0.0, 0.0) };\n  bool CUDA_mem_init_ = false;\n\n  bool sampled_states_CUDA_mem_init_ = false;  // cudaMalloc, cudaFree boolean\n  float* sampled_outputs_d_;                   // result of states that have been sampled from state trajectory kernel\n  float* sampled_noise_d_;                     // noise to be passed to the state trajectory kernel\n  float* sampled_costs_d_;       // result of cost that have been sampled from state and cost trajectory kernel\n  int* sampled_crash_status_d_;  // result of crash_status that have been sampled\n  std::vector<output_trajectory> sampled_trajectories_;  // sampled state trajectories from state trajectory kernel\n  std::vector<cost_trajectory> sampled_costs_;\n  std::vector<crash_status_trajectory> sampled_crash_status_;\n\n  // Propagated real state trajectory\n  state_trajectory propagated_feedback_state_trajectory_ = state_trajectory::Zero();\n\n  // tracking controller variables\n  bool enable_feedback_ = false;\n\n  // void copyControlStdDevToDevice(bool synchronize = true);\n\n  void copyNominalControlToDevice(bool synchronize = true);\n\n  /**\n   * Saves the sampled controls from the GPU back to the CPU\n   * Must be called after the rolloutKernel as that is when\n   * du_d becomes the sampled controls\n   */\n  void copySampledControlFromDevice(bool synchronize = true);\n\n  std::pair<int, float> findMinIndexAndValue(std::vector<int>& temp_list);\n  void copyTopControlFromDevice(bool synchronize = true);\n\n  void createAndSeedCUDARandomNumberGen();\n\n  void setSeedCUDARandomNumberGen(unsigned seed);\n\n  /**\n   * Allocates CUDA memory for actual states and nominal states if needed\n   * @param nominal_size if only actual this should be 0\n   */\n  void allocateCUDAMemoryHelper(int nominal_size = 0, bool allocate_double_noise = true);\n\n  // TODO all the copy to device functions to streamline process\nprivate:\n  // ======== MUST BE OVERWRITTEN =========\n  void allocateCUDAMemory()\n  {\n    allocateCUDAMemoryHelper();\n  };\n  /**\n   * TODO all copy to device and back functions implemented for specific controller\n   * When you write your own you must control when synchronize stream is called\n   */\n  // ======== END MUST BE OVERWRITTEN =====\n};\n\n#ifdef __CUDACC__\n#include \"controller.cu\"\n#endif\n\n#endif  // MPPIGENERIC_CONTROLLER_CUH\n"
  },
  {
    "path": "include/mppi/core/base_plant.hpp",
    "content": "/**\n * Created by Bogdan on 2/11/20.\n * Creates the API for interfacing with an MPPI controller\n * should define a compute_control based on state as well\n * as return timing info\n **/\n\n#ifndef BASE_PLANT_H_\n#define BASE_PLANT_H_\n\n// Double check if these are included in mppi_common.h\n#include <Eigen/Dense>\n#include <unistd.h>\n#include <vector>\n#include <array>\n#include <chrono>\n#include <atomic>\n#include <mutex>\n#include <thread>\n#include <memory>\n#include <mppi/controllers/controller.cuh>\n#include <mppi/utils/math_utils.h>\n\ntemplate <class CONTROLLER_T>\nclass BasePlant\n{\npublic:\n  using c_array = typename CONTROLLER_T::control_array;\n  using c_traj = typename CONTROLLER_T::control_trajectory;\n\n  using s_array = typename CONTROLLER_T::state_array;\n  using s_traj = typename CONTROLLER_T::state_trajectory;\n  // using K_traj = typename CONTROLLER_T::feedback_gain_trajectory;\n\n  using o_array = typename CONTROLLER_T::output_array;\n  using o_traj = typename CONTROLLER_T::output_trajectory;\n\n  using DYN_T = typename CONTROLLER_T::TEMPLATED_DYNAMICS;\n  using DYN_PARAMS_T = typename DYN_T::DYN_PARAMS_T;\n  using COST_T = typename CONTROLLER_T::TEMPLATED_COSTS;\n  using COST_PARAMS_T = typename COST_T::COST_PARAMS_T;\n  using TEMPLATED_CONTROLLER = CONTROLLER_T;\n  using CONTROLLER_PARAMS_T = typename CONTROLLER_T::TEMPLATED_PARAMS;\n  using SAMPLER_T = typename CONTROLLER_T::TEMPLATED_SAMPLING;\n  using SAMPLER_PARAMS_T = typename CONTROLLER_T::TEMPLATED_SAMPLING_PARAMS;\n\n  // Feedback related aliases\n  using FB_STATE_T = typename CONTROLLER_T::TEMPLATED_FEEDBACK::TEMPLATED_FEEDBACK_STATE;\n\n  typedef std::map<std::string, Eigen::VectorXf> buffer_trajectory;\n\nprotected:\n  std::mutex access_guard_;\n  mppi::util::MPPILoggerPtr logger_ = nullptr;\n\n  int hz_ = 10;  // Frequency of control publisher\n  int visualization_hz_ = 5;\n  bool debug_mode_ = false;\n\n  DYN_PARAMS_T dynamics_params_;\n  std::mutex dynamics_params_guard_;\n  COST_PARAMS_T cost_params_;\n  std::mutex cost_params_guard_;\n  CONTROLLER_PARAMS_T controller_params_;\n  std::mutex controller_params_guard_;\n  SAMPLER_PARAMS_T sampler_params_;\n  std::mutex sampler_params_guard_;\n\n  std::atomic<bool> has_new_dynamics_params_{ false };\n  std::atomic<bool> has_new_cost_params_{ false };\n  std::atomic<bool> has_new_controller_params_{ false };\n  std::atomic<bool> has_new_sampler_params_{ false };\n  std::atomic<bool> has_received_state_{ false };\n\n  // Values needed\n  s_array init_state_ = s_array::Zero();\n  c_array init_u_ = c_array::Zero();\n\n  // Values updated at every time step\n  s_array state_ = s_array::Zero();\n  c_array u_ = c_array::Zero();\n  // solution\n  s_traj state_traj_;\n  c_traj control_traj_;\n  o_traj output_traj_;\n\n  // values sometime updated\n  // TODO init to zero?\n  FB_STATE_T feedback_state_;\n\n  // from ROSHandle mppi_node\n  int optimization_stride_ = 1;\n  int last_optimization_stride_ = 0;\n\n  /**\n   * From before while loop\n   */\n  /**\n   * Robot Time: based off of the time stamps from the state estimator\n   * Wall Clock: always real time per the computer\n   */\n  // Robot Time: can scale with a simulation\n  std::atomic<double> last_used_state_update_time_{ 0.0 };  // time of the last state update that was used for\n                                                            // optimization\n  std::atomic<double> state_time_{ 0.0 };\n\n  // Wall Clock: always real time\n  double optimize_loop_duration_ = 0;  // duration of the entire controller run loop\n  double optimization_duration_ = 0;   // Most recent time it took to run MPPI iteration\n  double feedback_duration_ = 0;       // most recent time it took to run the feedback controller\n  double sleep_duration_ = 0;          // how long the most recent loop in runControlLoop slept\n  double avg_loop_time_ms_ = 0;        // Average time it takes to run the controller\n  double avg_optimize_time_ms_ = 0;    // Average time it takes to runControlLoop\n  double avg_feedback_time_ms_ = 0;    // Average time it takes to run the feedback controller\n  double avg_sleep_time_ms_ = 0;       // Average time the runControlLoop sleeps between calls\n\n  int num_iter_ = 0;  // number of calls to computeControl\n  /**\n   * represents the status of the vehicle\n   * 0: running normally\n   * 1: not activated or no state information\n   */\n  int status_ = 1;\n\npublic:\n  std::shared_ptr<CONTROLLER_T> controller_;\n\n  //  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  BasePlant(std::shared_ptr<CONTROLLER_T> controller, int hz, int optimization_stride)\n  {\n    controller_ = controller;\n    hz_ = hz;\n    optimization_stride_ = optimization_stride;\n    control_traj_ = c_traj::Zero();\n    state_traj_ = s_traj::Zero();\n    dynamics_params_ = controller->model_->getParams();\n    cost_params_ = controller_->cost_->getParams();\n    init_state_ = controller_->model_->getZeroState();\n    state_ = controller_->model_->getZeroState();\n    auto logger = std::make_shared<mppi::util::MPPILogger>();\n    setLogger(logger);\n  };\n  /**\n   * Destructor must be virtual so that children are properly\n   * destroyed when called from a BasePlant reference\n   */\n  virtual ~BasePlant() = default;\n\n  // ======== PURE VIRTUAL =========\n  /**\n   * applies the control to the system\n   * @param u\n   */\n  virtual void pubControl(const c_array& u) = 0;\n\n  /**\n   * publishes the target nominal state\n   * @param s\n   */\n  virtual void pubNominalState(const s_array& s) = 0;\n\n  virtual void pubFreeEnergyStatistics(MPPIFreeEnergyStatistics& fe_stats) = 0;\n\n  /**\n   * @brief Checks the system status.\n   * @return An integer specifying the status. 0 means the system is operating\n   * nominally, 1 means something is wrong but no action needs to be taken,\n   * 2 means that the vehicle should stop immediately.\n   */\n  virtual int checkStatus() = 0;\n\n  /**\n   * @return the current time not necessarily real time\n   */\n  virtual double getCurrentTime() = 0;\n\n  /**\n   * @return the timestamp from the most recent position\n   */\n  virtual double getPoseTime() = 0;\n\n  /**\n   * gets the correct time for the state message that has been updated through updateState\n   * @return\n   */\n  virtual double getStateTime()\n  {\n    return state_time_;\n  }\n\n  // ======== PURE VIRTUAL END ====\n\n  s_traj getStateTraj()\n  {\n    return state_traj_;\n  }\n  c_traj getControlTraj()\n  {\n    return control_traj_;\n  }\n  FB_STATE_T getFeedbackState()\n  {\n    return feedback_state_;\n  }\n\n  /**\n   * Return the latest state received\n   * @return the latest state\n   */\n  virtual s_array getState()\n  {\n    std::lock_guard<std::mutex> lck(access_guard_);\n    return state_;\n  };\n\n  virtual void setState(const Eigen::Ref<const s_array>& state)\n  {\n    state_ = state;\n  }\n  virtual void setControl(const Eigen::Ref<const c_array>& u)\n  {\n    u_ = u;\n  }\n  virtual void setDebugMode(bool mode)\n  {\n    debug_mode_ = mode;\n  }\n\n  void resetStateTime()\n  {\n    last_used_state_update_time_ = -1;\n  };\n\n  double getAvgOptimizationTime() const\n  {\n    return avg_optimize_time_ms_;\n  };\n\n  int getTargetOptimizationStride()\n  {\n    return optimization_stride_;\n  };\n  int getLastOptimizationStride()\n  {\n    return last_optimization_stride_;\n  };\n  void setTargetOptimizationStride(int new_val)\n  {\n    optimization_stride_ = new_val;\n  }\n\n  int getHz() const\n  {\n    return hz_;\n  }\n\n  void setHz(int hz)\n  {\n    hz_ = hz;\n  }\n\n  int getVisualizationHz() const\n  {\n    return visualization_hz_;\n  }\n\n  void setVisualizationHz(int hz)\n  {\n    visualization_hz_ = hz;\n  }\n\n  virtual buffer_trajectory getSmoothedBuffer(double time)\n  {\n    throw std::logic_error(\"Invalid dynamics with current plant, it requires the buffered plant\");\n  }\n\n  virtual void setSolution(const Eigen::Ref<const s_traj>& state_seq, const Eigen::Ref<const c_traj>& control_seq,\n                           const Eigen::Ref<const o_traj>& output_seq, const FB_STATE_T& fb_state, double timestamp)\n  {\n    last_used_state_update_time_ = timestamp;\n    std::lock_guard<std::mutex> guard(access_guard_);\n    state_traj_ = state_seq;\n    output_traj_ = output_seq;\n    control_traj_ = control_seq;\n    feedback_state_ = fb_state;\n    num_iter_++;\n  }\n\n  /**\n   * updates the state and publishes a new control\n   * @param state the most recent state from state estimator\n   * @param time the time of the most recent state from the state estimator\n   */\n  virtual void updateState(Eigen::Ref<s_array> state, double time)\n  {\n    // calculate and update all timing variables\n    double temp_last_state_update_time = last_used_state_update_time_;\n\n    double time_since_last_opt = time - temp_last_state_update_time;\n\n    state_ = state;\n    state_time_ = time;\n    has_received_state_ = true;\n\n    if (num_iter_ == 0)\n    {\n      // we have not optimized yet so no reason to publish controls\n      return;\n    }\n\n    // check if the requested time is in the calculated trajectory\n    bool t_within_trajectory =\n        time >= temp_last_state_update_time &&\n        time < temp_last_state_update_time + controller_->getDt() * controller_->getNumTimesteps();\n\n    // time_since_last_opt checks if we have a new state or not essentially, only publish control on new state time\n    if (time_since_last_opt > 0 && t_within_trajectory)\n    {\n      s_array target_nominal_state = this->controller_->interpolateState(state_traj_, time_since_last_opt);\n      pubControl(controller_->getCurrentControl(state, time_since_last_opt, target_nominal_state, control_traj_,\n                                                feedback_state_));\n      if (debug_mode_)\n      {\n        pubNominalState(target_nominal_state);\n      }\n    }\n  }\n\n  virtual bool hasNewDynamicsParams()\n  {\n    return has_new_dynamics_params_;\n  };\n  virtual bool hasNewCostParams()\n  {\n    return has_new_cost_params_;\n  };\n  virtual bool hasNewControllerParams()\n  {\n    return has_new_controller_params_;\n  };\n  virtual bool hasNewSamplerParams()\n  {\n    return has_new_sampler_params_;\n  };\n\n  virtual DYN_PARAMS_T getNewDynamicsParams(bool set_flag = false)\n  {\n    has_new_dynamics_params_ = set_flag;\n    return dynamics_params_;\n  }\n  virtual COST_PARAMS_T getNewCostParams(bool set_flag = false)\n  {\n    has_new_cost_params_ = set_flag;\n    return cost_params_;\n  }\n  virtual CONTROLLER_PARAMS_T getNewControllerParams(bool set_flag = false)\n  {\n    has_new_controller_params_ = set_flag;\n    return controller_params_;\n  }\n  virtual SAMPLER_PARAMS_T getNewSamplerParams(bool set_flag = false)\n  {\n    has_new_sampler_params_ = set_flag;\n    return sampler_params_;\n  }\n\n  virtual void setDynamicsParams(const DYN_PARAMS_T& params)\n  {\n    std::lock_guard<std::mutex> guard(dynamics_params_guard_);\n    dynamics_params_ = params;\n    has_new_dynamics_params_ = true;\n  }\n  virtual void setCostParams(const COST_PARAMS_T& params)\n  {\n    std::lock_guard<std::mutex> guard(cost_params_guard_);\n    cost_params_ = params;\n    has_new_cost_params_ = true;\n  }\n  virtual void setControllerParams(const CONTROLLER_PARAMS_T& params)\n  {\n    std::lock_guard<std::mutex> guard(controller_params_guard_);\n    controller_params_ = params;\n    has_new_controller_params_ = true;\n  }\n  virtual void setSamplerParams(const SAMPLER_PARAMS_T& params)\n  {\n    std::lock_guard<std::mutex> guard(sampler_params_guard_);\n    sampler_params_ = params;\n    has_new_sampler_params_ = true;\n  }\n\n  virtual void setLogger(const mppi::util::MPPILoggerPtr& logger)\n  {\n    logger_ = logger;\n    controller_->setLogger(logger);\n  }\n\n  virtual void setLogLevel(const mppi::util::LOG_LEVEL& level)\n  {\n    logger_->setLogLevel(level);\n    controller_->setLogLevel(level);\n  }\n\n  virtual mppi::util::MPPILoggerPtr getLogger()\n  {\n    return logger_;\n  }\n\n  virtual mppi::util::MPPILoggerPtr getLogger() const\n  {\n    return logger_;\n  }\n\n  /**\n   *\n   * @param controller\n   * @param state\n   * @return\n   */\n  virtual bool updateParameters()\n  {\n    bool changed = false;\n    // Update cost parameters\n    if (hasNewCostParams())\n    {\n      std::lock_guard<std::mutex> guard(cost_params_guard_);\n      changed = true;\n      COST_PARAMS_T cost_params = getNewCostParams();\n      controller_->cost_->setParams(cost_params);\n    }\n    // Update dynamics params\n    if (hasNewDynamicsParams())\n    {\n      std::lock_guard<std::mutex> guard(dynamics_params_guard_);\n      changed = true;\n      DYN_PARAMS_T dyn_params = getNewDynamicsParams();\n      controller_->model_->setParams(dyn_params);\n    }\n    // Update controller params\n    if (hasNewControllerParams())\n    {\n      std::lock_guard<std::mutex> guard(controller_params_guard_);\n      changed = true;\n      CONTROLLER_PARAMS_T controller_params = getNewControllerParams();\n      controller_->setParams(controller_params);\n    }\n    // Update sampler params\n    if (hasNewSamplerParams())\n    {\n      std::lock_guard<std::mutex> guard(sampler_params_guard_);\n      changed = true;\n      SAMPLER_PARAMS_T sampler_params = getNewSamplerParams();\n      controller_->setSamplingParams(sampler_params);\n    }\n    return changed;\n  }\n\n  /**\n   * two concepts of time\n   *    1. wall clock: how long it takes according to actual time to optimize\n   *    2. robot time: how long has elapsed from the perspective of the robot (per the state estimator)\n   * @param controller\n   * @param is_alive\n   * @return the millisecond number that the loop iteration started at\n   */\n  void runControlIteration(std::atomic<bool>* is_alive)\n  {\n    std::chrono::steady_clock::time_point loop_start = std::chrono::steady_clock::now();\n    if (!is_alive->load())\n    {\n      // break out if it should stop\n      return;\n    }\n\n    double temp_last_state_time = getStateTime();\n    double temp_last_used_state_update_time = last_used_state_update_time_;\n\n    // If it is the first iteration and we have received state, we should not wait for timestamps to differ\n    bool skip_first_loop = num_iter_ == 0 && has_received_state_;\n\n    // wait for a new state to compute control sequence from\n    while (temp_last_used_state_update_time == temp_last_state_time && !skip_first_loop && is_alive->load())\n    {\n      usleep(50);\n      temp_last_state_time = getStateTime();\n      // In case when runControlIteration is ran before getting state and state time is specifically 0\n      skip_first_loop = num_iter_ == 0 && has_received_state_;\n    }\n    if (!is_alive->load())\n    {\n      // if we have been stopped, exit\n      return;\n    }\n\n    // updates the parameters if needed to ensure most up to date information\n    updateParameters();\n\n    // gets the most recent possible state and time locked together so that we can ensure they are synced and up to date\n    this->access_guard_.lock();\n    s_array state = state_;\n    temp_last_state_time = state_time_;\n    this->access_guard_.unlock();\n\n    // Check if there are nans in the initial state and don't continue\n    if (!isfinite(state.sum()))\n    {\n      return;\n    }\n\n    if (this->controller_->model_->checkRequiresBuffer())\n    {\n      std::lock_guard<std::mutex> guard(dynamics_params_guard_);\n      this->controller_->model_->updateFromBuffer(this->getSmoothedBuffer(temp_last_state_time));\n      HANDLE_ERROR(cudaStreamSynchronize(this->controller_->model_->stream_));\n    }\n\n    // Check the robots status for this optimization\n    int temp_status = checkStatus();\n\n    // calculate how much we should slide the control sequence\n    double dt = temp_last_state_time - temp_last_used_state_update_time;\n    if (num_iter_ == 0)\n    {  //\n      // should only happen on the first iteration\n      dt = 0;\n      last_optimization_stride_ = 0;\n    }\n    else\n    {\n      last_optimization_stride_ = std::max(int(round(dt / this->controller_->getDt())), optimization_stride_);\n    }\n    // printf(\"calc optimization stride %f %f %f %d\\n\", dt, temp_last_used_state_update_time, temp_last_state_time,\n    // last_optimization_stride_);\n    // determine how long we should stride based off of robot time\n\n    if (last_optimization_stride_ > 0 && last_optimization_stride_ < controller_->getNumTimesteps())\n    {\n      controller_->updateImportanceSamplingControl(state, last_optimization_stride_);\n      controller_->slideControlSequence(last_optimization_stride_);\n    }\n\n    // Compute a new control sequence\n    std::chrono::steady_clock::time_point optimization_start = std::chrono::steady_clock::now();\n    controller_->computeControl(state, last_optimization_stride_);  // Compute the nominal control sequence\n\n    MPPIFreeEnergyStatistics fe_stats = controller_->getFreeEnergyStatistics();\n\n    c_traj control_traj = controller_->getControlSeq();\n    if (!control_traj.allFinite())\n    {\n      std::cerr << \"ERROR: Nan in control inside plant\" << std::endl;\n      std::cerr << control_traj << std::endl;\n      throw std::runtime_error(\"Control Trajectory inside plant has a NaN\");\n    }\n    s_traj state_traj = controller_->getTargetStateSeq();\n    if (!state_traj.allFinite())\n    {\n      std::cerr << \"ERROR: Nan in state inside plant\" << std::endl;\n      std::cerr << state_traj << std::endl;\n      throw std::runtime_error(\"State Trajectory inside plant has a NaN\");\n    }\n    o_traj output_traj = controller_->getTargetOutputSeq();\n    if (!output_traj.allFinite())\n    {\n      std::cerr << \"ERROR: Nan in output inside plant\" << std::endl;\n      std::cerr << output_traj << std::endl;\n      throw std::runtime_error(\"Output Trajectory inside plant has a NaN\");\n    }\n    optimization_duration_ = mppi::math::timeDiffms(std::chrono::steady_clock::now(), optimization_start);\n\n    std::chrono::steady_clock::time_point feedback_start = std::chrono::steady_clock::now();\n    // TODO make sure this is zero by default\n    FB_STATE_T feedback_state;\n    if (controller_->getFeedbackEnabled())\n    {\n      controller_->computeFeedback(state);\n      feedback_state = controller_->getFeedbackState();\n    }\n    feedback_duration_ = mppi::math::timeDiffms(std::chrono::steady_clock::now(), feedback_start);\n\n    // Set the updated solution for execution\n    setSolution(state_traj, control_traj, output_traj, feedback_state, temp_last_state_time);\n    status_ = temp_status;\n    pubFreeEnergyStatistics(fe_stats);\n\n    // calculate the propogated feedback trajectory\n    controller_->computeFeedbackPropagatedStateSeq();\n\n    // Update the average loop time data\n    double prev_iter_percent = (num_iter_ - 1.0) / num_iter_;\n\n    avg_optimize_time_ms_ = prev_iter_percent * avg_optimize_time_ms_ + optimization_duration_ / num_iter_;\n    avg_feedback_time_ms_ = prev_iter_percent * avg_feedback_time_ms_ + feedback_duration_ / num_iter_;\n\n    optimize_loop_duration_ = mppi::math::timeDiffms(std::chrono::steady_clock::now(), loop_start);\n    avg_loop_time_ms_ = prev_iter_percent * avg_loop_time_ms_ + optimize_loop_duration_ / num_iter_;\n  }\n\n  void runControlLoop(std::atomic<bool>* is_alive)\n  {\n    // Initial condition of the robot\n    state_ = init_state_;\n\n    // Initial control value\n    u_ = init_u_;\n\n    controller_->resetControls();\n\n    // Start the control loop.\n    while (is_alive->load())\n    {\n      double wait_until_real_time = getCurrentTime() + (1.0 / hz_) * optimization_stride_;\n      runControlIteration(is_alive);\n\n      double wait_until_state_time = last_used_state_update_time_ + (1.0 / hz_) * optimization_stride_;\n      // printf(\"last used state update time %f last_stride = %d\\n\", last_used_state_update_time_,\n      // last_optimization_stride_); printf(\"wait until time %f current time %f\\n\", wait_until_time, getCurrentTime());\n\n      std::chrono::steady_clock::time_point sleep_start = std::chrono::steady_clock::now();\n      while (is_alive->load() && wait_until_state_time > getStateTime())\n      {\n        updateParameters();\n        usleep(50);\n      }\n      sleep_duration_ = mppi::math::timeDiffms(std::chrono::steady_clock::now(), sleep_start);\n      double prev_iter_percent = (num_iter_ - 1.0) / num_iter_;\n      avg_sleep_time_ms_ = prev_iter_percent * avg_sleep_time_ms_ + sleep_duration_ / num_iter_;\n      // printf(\"sleep: %f loop_time %f at time %f\", sleep_duration_, optimize_loop_duration_, getCurrentTime());\n      // std::cout << \" loop ended at: \" <<\n      // std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count()\n      // << std::endl;\n    }\n  }\n};\n\n#endif  // BASE_PLANT_H_\n"
  },
  {
    "path": "include/mppi/core/buffer.hpp",
    "content": "/**\n * @file buffer.hpp\n * @brief Buffer of states and controls\n * @author Bogdan Vlahov\n * @version 0.0.1\n * @date 2024-05-04\n */\n#pragma once\n\n#include <mppi/dynamics/dynamics.cuh>\n\n#include <algorithm>\n#include <functional>\n#include <mutex>\n#include <list>\n\ntemplate <class T>\nstruct BufferMessage\n{\n  double time = -1;\n  T data;\n  bool required = true;\n\n  BufferMessage(double time, T data)\n  {\n    this->time = time;\n    this->data = data;\n  }\n};\n\ntemplate <class DYN_T>\nclass Buffer\n{\npublic:\n  using buffer_trajectory = typename DYN_T::buffer_trajectory;\n  using c_array = typename DYN_T::control_array;\n\n  void updateExtraValue(const std::string& name, float value, double time)\n  {\n    std::lock_guard<std::mutex> guard(this->buffer_guard_);\n    if (prev_extra_.find(name) == prev_extra_.end())\n    {\n      prev_extra_.emplace(std::make_pair(name, std::list<BufferMessage<float>>()));\n    }\n    insertionSort(prev_extra_[name], time, value);\n  }\n\n  void updateControls(c_array& control, double time)\n  {\n    std::lock_guard<std::mutex> guard(this->buffer_guard_);\n    insertionSort<c_array>(prev_controls_, time, control);\n  }\n\n  template <class T>\n  static void insertionSort(std::list<BufferMessage<T>>& list, double time, T val)\n  {\n    if (list.empty())\n    {\n      list.push_back(BufferMessage<T>(time, val));\n      return;\n    }\n    for (auto it = list.rbegin(); it != list.rend(); it++)\n    {\n      if (it->time < time)\n      {\n        list.insert(it.base(), BufferMessage<T>(time, val));\n        return;\n      }\n    }\n    list.push_front(BufferMessage<T>(time, val));\n  }\n\n  template <class T>\n  static void cleanList(std::list<BufferMessage<T>>& list, double time, double buffer_time_horizon)\n  {\n    if (list.empty())\n    {\n      return;\n    }\n    auto it = list.begin();\n    // iterate until the time is greater than\n    for (; (it != list.end() && it->time < time - buffer_time_horizon); it++)\n    {\n    }\n\n    list.erase(list.begin(), it);\n  }\n\n  static Eigen::Quaternionf interp(std::list<BufferMessage<Eigen::Quaternionf>>& list, double time)\n  {\n    if (list.empty())\n    {\n      return Eigen::Quaternionf::Identity();\n    }\n    auto it = list.rbegin();\n    // iterate until the time is greater than\n    for (; (it != list.rend() && it->time > time); it++)\n    {\n    }\n    if (it == list.rbegin())\n    {\n      return it->data;\n    }\n    // if we search entire list then use the first index\n    auto it_new = std::prev(it);\n    if (it == list.rend())\n    {\n      return list.begin()->data;\n    }\n\n    double diff = it_new->time - it->time;\n\n    double alpha = (time - it->time) / diff;\n    // ensure we don't check beyond, i.e. use closest when interpolating outside bounds\n    alpha = std::min(std::max(alpha, 0.0), 1.0);\n\n    return it->data.slerp(alpha, it_new->data);\n  }\n\n  template <class T>\n  static T interp(std::list<BufferMessage<T>>& list, double time)\n  {\n    if (list.empty())\n    {\n      return T();\n    }\n    auto it = list.rbegin();\n    // iterate until the time is greater than\n    for (; (it != list.rend() && it->time > time); it++)\n    {\n    }\n    if (it == list.rbegin())\n    {\n      return it->data;\n    }\n    // if we search entire list then use the first index\n    auto it_new = std::prev(it);\n    if (it == list.rend())\n    {\n      return list.begin()->data;\n    }\n\n    double diff = it_new->time - it->time;\n\n    double alpha = (time - it->time) / diff;\n    // ensure we don't check beyond, i.e. use closest when interpolating outside bounds\n    alpha = std::min(std::max(alpha, 0.0), 1.0);\n\n    return (1 - alpha) * it->data + alpha * it_new->data;\n  }\n\n  void updateOdometry(Eigen::Vector3f& pos, Eigen::Quaternionf& quat, Eigen::Vector3f& vel, Eigen::Vector3f& omega,\n                      double time)\n  {\n    this->buffer_guard_.lock();\n    // inserts odometry into buffers using insertion sort\n    insertionSort(prev_position_, time, pos);\n    insertionSort(prev_quaternion_, time, quat);\n    insertionSort(prev_velocity_, time, vel);\n    insertionSort(prev_omega_, time, omega);\n    this->buffer_guard_.unlock();\n  }\n\n  std::map<std::string, float> getInterpState(double time)\n  {\n    std::lock_guard<std::mutex> guard(this->buffer_guard_);\n    std::map<std::string, float> result;\n    if (prev_position_.empty())\n    {\n      return result;\n    }\n\n    Eigen::Vector3f interp_pos = interp(prev_position_, time);\n    result[\"POS_X\"] = interp_pos.x();\n    result[\"POS_Y\"] = interp_pos.y();\n    result[\"POS_Z\"] = interp_pos.z();\n    Eigen::Quaternionf interp_quat = interp(prev_quaternion_, time);\n    result[\"Q_W\"] = interp_quat.w();\n    result[\"Q_X\"] = interp_quat.x();\n    result[\"Q_Y\"] = interp_quat.y();\n    result[\"Q_Z\"] = interp_quat.z();\n    float roll, pitch, yaw;\n    mppi::math::Quat2EulerNWU(interp_quat, roll, pitch, yaw);\n    result[\"ROLL\"] = roll;\n    result[\"PITCH\"] = pitch;\n    result[\"YAW\"] = yaw;\n    Eigen::Vector3f interp_vel = interp(prev_velocity_, time);\n    result[\"VEL_X\"] = interp_vel.x();\n    result[\"VEL_Y\"] = interp_vel.y();\n    result[\"VEL_Z\"] = interp_vel.z();\n    Eigen::Vector3f interp_omega = interp(prev_omega_, time);\n    result[\"OMEGA_X\"] = interp_omega.x();\n    result[\"OMEGA_Y\"] = interp_omega.y();\n    result[\"OMEGA_Z\"] = interp_omega.z();\n\n    auto controls_interp = interp(prev_controls_, time);\n    for (int i = 0; i < controls_interp.rows(); i++)\n    {\n      result[\"CONTROL_\" + std::to_string(i)] = controls_interp(i);\n    }\n\n    for (auto& it : prev_extra_)\n    {\n      result[it.first] = interp(it.second, time);\n    }\n    return result;\n  }\n\n  buffer_trajectory getSmoothedBuffer(double latest_time, double buffer_tau, double buffer_dt)\n  {\n    // does the latest state to make sure we have valid values\n    std::map<std::string, float> start_vals = getInterpState(latest_time);\n    buffer_trajectory result;\n\n    // if not enough data return empty message\n    this->buffer_guard_.lock();\n    float time_diff = prev_position_.rbegin()->time - prev_position_.begin()->time;\n    if (time_diff < buffer_tau)\n    {\n      std::cout << \"not enough time for buffer, returning early\" << prev_position_.rbegin()->time << \" - \"\n                << prev_position_.begin()->time << \" = \" << time_diff << \" < \" << buffer_tau << std::endl;\n      this->buffer_guard_.unlock();\n      return result;\n    }\n    this->buffer_guard_.unlock();\n\n    int steps = buffer_tau / buffer_dt + 1;\n\n    for (const auto& start_val : start_vals)\n    {\n      result[start_val.first] = Eigen::VectorXf(steps);\n      result[start_val.first](steps - 1) = start_val.second;\n    }\n\n    // goes from [t - tau, t)\n    for (int t = 0; t <= steps - 1; t++)\n    {\n      // get query time\n      double query_time = latest_time - (steps - 1) * buffer_dt + t * buffer_dt;\n      std::map<std::string, float> interp_vals = getInterpState(query_time);\n\n      // interpolate values\n      for (auto& interp_val : interp_vals)\n      {\n        result[interp_val.first](t) = interp_val.second;\n      }\n    }\n\n    return result;\n  }\n\n  void cleanBuffers(double time, double horizon)\n  {\n    std::lock_guard<std::mutex> guard(this->buffer_guard_);\n    cleanList(prev_position_, time, horizon);\n    cleanList(prev_quaternion_, time, horizon);\n    cleanList(prev_velocity_, time, horizon);\n    cleanList(prev_omega_, time, horizon);\n    cleanList(prev_controls_, time, horizon);\n    for (auto& it : prev_extra_)\n    {\n      cleanList(it.second, time, horizon);\n    }\n  }\n\n  void clearBuffers()\n  {\n    std::lock_guard<std::mutex> guard(this->buffer_guard_);\n    prev_position_.clear();\n    prev_quaternion_.clear();\n    prev_velocity_.clear();\n    prev_omega_.clear();\n    prev_controls_.clear();\n    prev_extra_.clear();\n  }\n\n  std::list<BufferMessage<Eigen::Vector3f>> getPrevPositionList()\n  {\n    return prev_position_;\n  }\n\n  std::list<BufferMessage<Eigen::Quaternionf>> getPrevQuaternionList()\n  {\n    return prev_quaternion_;\n  }\n\n  std::list<BufferMessage<Eigen::Vector3f>> getPrevVelocityList()\n  {\n    return prev_velocity_;\n  }\n\n  std::list<BufferMessage<Eigen::Vector3f>> getPrevOmegaList()\n  {\n    return prev_omega_;\n  }\n\n  std::list<BufferMessage<c_array>> getPrevControlList()\n  {\n    return prev_controls_;\n  }\n\n  std::map<std::string, std::list<BufferMessage<float>>> getPrevExtraList()\n  {\n    return prev_extra_;\n  }\n\n  double getLatestOdomTime()\n  {\n    if (prev_position_.empty())\n    {\n      return 0;\n    }\n    return prev_position_.rbegin()->time;\n  }\n\n  double getOldestOdomTime()\n  {\n    if (prev_position_.empty())\n    {\n      return 0;\n    }\n    return prev_position_.begin()->time;\n  }\n\nprivate:\n  std::mutex buffer_guard_;\n\n  std::list<BufferMessage<Eigen::Vector3f>> prev_position_;\n  std::list<BufferMessage<Eigen::Quaternionf>> prev_quaternion_;\n  std::list<BufferMessage<Eigen::Vector3f>> prev_velocity_;\n  std::list<BufferMessage<Eigen::Vector3f>> prev_omega_;\n  std::list<BufferMessage<c_array>> prev_controls_;\n  std::map<std::string, std::list<BufferMessage<float>>> prev_extra_;\n};\n"
  },
  {
    "path": "include/mppi/core/buffered_plant.hpp",
    "content": "//\n// Created by jason on 7/20/21.\n//\n\n#ifndef MPPIGENERIC_BUFFERED_PLANT_H\n#define MPPIGENERIC_BUFFERED_PLANT_H\n\n#include \"buffer.hpp\"\n#include \"base_plant.hpp\"\n\ntemplate <class CONTROLLER_T>\nclass BufferedPlant : public BasePlant<CONTROLLER_T>\n{\npublic:\n  using s_array = typename BasePlant<CONTROLLER_T>::s_array;\n  using c_array = typename BasePlant<CONTROLLER_T>::c_array;\n\n  using buffer_trajectory = typename BasePlant<CONTROLLER_T>::buffer_trajectory;\n\n  BufferedPlant(std::shared_ptr<CONTROLLER_T> controller, int hz, int opt_stide)\n    : BasePlant<CONTROLLER_T>(controller, hz, opt_stide)\n  {\n  }\n\n  void updateExtraValue(const std::string& name, float value, double time)\n  {\n    buffer_.updateExtraValue(name, value, time);\n  }\n\n  void updateControls(c_array& control, double time)\n  {\n    buffer_.updateControls(control, time);\n  }\n\n  void updateOdometry(Eigen::Vector3f& pos, Eigen::Quaternionf& quat, Eigen::Vector3f& vel, Eigen::Vector3f& omega,\n                      double time)\n  {\n    buffer_.updateOdometry(pos, quat, vel, omega, time);\n\n    /**\n     * Uses the most recent odometry information\n     * If any other sources are more delayed it uses the most recent value\n     * If other sources are more recent it gets interpolated to odom time\n     */\n    std::map<std::string, float> smoothed = buffer_.getInterpState(time);\n    s_array state = this->controller_->model_->stateFromMap(smoothed);\n    BasePlant<CONTROLLER_T>::updateState(state, time);\n  }\n\n  std::map<std::string, float> getInterpState(double time)\n  {\n    return buffer_.getInterpState(time);\n  }\n\n  bool updateParameters()\n  {\n    // removes extra values from the buffer\n    double time = this->getStateTime();\n    buffer_.cleanBuffers(time, buffer_time_horizon_);\n    return BasePlant<CONTROLLER_T>::updateParameters();\n  }\n\n  buffer_trajectory getSmoothedBuffer(double latest_time)\n  {\n    return buffer_.getSmoothedBuffer(latest_time, buffer_tau_, buffer_dt_);\n  }\n\n  void cleanBuffers(double time)\n  {\n    buffer_.cleanBuffers(time, buffer_time_horizon_);\n  }\n\n  void clearBuffers()\n  {\n    buffer_.clearBuffers();\n  }\n\n  double getBufferDt() const\n  {\n    return buffer_dt_;\n  }\n\n  void setBufferDt(const double buff_dt)\n  {\n    buffer_dt_ = buff_dt;\n  }\n\nprotected:\n  Buffer<typename CONTROLLER_T::TEMPLATED_DYNAMICS> buffer_;\n\n  double buffer_time_horizon_ = 2.0;  // how long to store values in the buffer\n  double buffer_tau_ = 1.0;           // how in history to create well sampled positions from\n  double buffer_dt_ = 0.02;           // the spacing between well sampled buffer positions\n};\n\n#endif  // MPPIGENERIC_BUFFERED_PLANT_H\n"
  },
  {
    "path": "include/mppi/core/mppi_common.cu",
    "content": "#include <mppi/core/mppi_common.cuh>\n#include <curand.h>\n#include <mppi/utils/gpu_err_chk.cuh>\n#include <mppi/utils/math_utils.h>\n#include <mppi/utils/cuda_math_utils.cuh>\n\n// CUDA barriers were first implemented in CUDA 11\n#if defined(CMAKE_USE_CUDA_BARRIERS) && defined(CUDART_VERSION) && CUDART_VERSION > 11000\n#include <cuda/barrier>\nusing barrier = cuda::barrier<cuda::thread_scope_block>;\n\n// Turn on/off various CUDA barriers from CMake configuration\n#ifdef CMAKE_USE_CUDA_BARRIERS_DYN\n#define USE_CUDA_BARRIERS_DYN\n#endif\n#ifdef CMAKE_USE_CUDA_BARRIERS_COST\n#define USE_CUDA_BARRIERS_COST\n#endif\n#ifdef CMAKE_USE_CUDA_BARRIERS_ROLLOUT\n#define USE_CUDA_BARRIERS_ROLLOUT\n#endif\n#endif\n\n#include <cooperative_groups.h>\nnamespace cg = cooperative_groups;\nnamespace mp1 = mppi::p1;\n\nnamespace mppi\n{\nnamespace kernels\n{\n/*******************************************************************************************************************\n * Kernel Functions\n *******************************************************************************************************************/\ntemplate <class DYN_T, class COST_T, class SAMPLING_T>\n__global__ void rolloutKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling,\n                              COST_T* __restrict__ costs, float dt, const int num_timesteps, const int num_rollouts,\n                              const float* __restrict__ init_x_d, float lambda, float alpha,\n                              float* __restrict__ trajectory_costs_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int block_idx = blockIdx.x;\n  const int global_idx = blockDim.x * block_idx + thread_idx;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int distribution_idx = threadIdx.z;\n  const int distribution_dim = blockDim.z;\n  const int sample_dim = blockDim.x;\n  // Ensure that there is enough room for the SHARED_MEM_REQUEST_GRD_BYTES and SHARED_MEM_REQUEST_BLK_BYTES portions to\n  // be aligned to the float4 boundary.\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(sample_dim * DYN_T::OUTPUT_DIM * distribution_dim)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(sample_dim * DYN_T::CONTROL_DIM * distribution_dim)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n  float* theta_c_shared = &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n  float* running_cost_shared = &theta_c_shared[size_of_theta_c_bytes / sizeof(float)];\n  int* crash_status_shared = (int*)&running_cost_shared[math::nearest_multiple_4(blockDim.x * blockDim.y * blockDim.z)];\n\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* barrier_shared = (barrier*)&crash_status_shared[math::nearest_multiple_4(sample_dim * distribution_dim)];\n#endif\n\n  // Create local state, state dot and controls\n  int running_cost_index = thread_idx + blockDim.x * (thread_idy + blockDim.y * thread_idz);\n  float* x = &x_shared[shared_idx * DYN_T::STATE_DIM];\n  float* x_next = &x_next_shared[shared_idx * DYN_T::STATE_DIM];\n  float* x_temp;\n  float* xdot = &x_dot_shared[shared_idx * DYN_T::STATE_DIM];\n  float* u = &u_shared[shared_idx * DYN_T::CONTROL_DIM];\n  float* y = &y_shared[shared_idx * DYN_T::OUTPUT_DIM];\n  float* running_cost = &running_cost_shared[running_cost_index];\n  running_cost[0] = 0.0f;\n  int* crash_status = &crash_status_shared[shared_idx];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* bar = &barrier_shared[shared_idx];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  // Load global array to shared array\n  loadGlobalToShared<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM>(num_rollouts, blockDim.y, global_idx, thread_idy, thread_idz,\n                                                           init_x_d, x, xdot, u);\n  __syncthreads();\n\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0f, dt);\n  sampling->initializeDistributions(y, 0.0f, dt, theta_d_shared);\n  costs->initializeCosts(y, u, theta_c_shared, 0.0f, dt);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    sampling->readControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    // usually just control clamping\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    // Copy control constraints back to global memory\n    sampling->writeControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    running_cost[0] +=\n        costs->computeRunningCost(y, u, t, theta_c_shared, crash_status) +\n        sampling->computeLikelihoodRatioCost(u, theta_d_shared, global_idx, t, distribution_idx, lambda, alpha);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n  }\n\n  // Add all costs together\n  running_cost = &running_cost_shared[thread_idx + blockDim.x * blockDim.y * thread_idz];\n  __syncthreads();\n  costArrayReduction(running_cost, blockDim.y, thread_idy, blockDim.y, thread_idy == 0, blockDim.x);\n  // Compute terminal cost and the final cost for each thread\n  computeAndSaveCost(num_rollouts, num_timesteps, global_idx, costs, y, running_cost[0] / (num_timesteps),\n                     theta_c_shared, trajectory_costs_d);\n}\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE>\n__global__ void rolloutCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                                  const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                  const float* __restrict__ y_d, float* __restrict__ trajectory_costs_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int global_idx = blockIdx.x;\n  const int distribution_idx = threadIdx.z;\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n\n  int running_cost_index = thread_idx + blockDim.x * (thread_idy + blockDim.y * thread_idz);\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n  float* y_shared = entire_buffer;\n  float* u_shared = &y_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::OUTPUT_DIM)];\n  float* running_cost_shared = &u_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::CONTROL_DIM)];\n  int* crash_status_shared = (int*)&running_cost_shared[math::nearest_multiple_4(blockDim.x * blockDim.y * blockDim.z)];\n  float* theta_c = (float*)&crash_status_shared[math::nearest_multiple_4(blockDim.x * blockDim.z)];\n  float* theta_d = &theta_c[size_of_theta_c_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* barrier_shared = (barrier*)&theta_d[size_of_theta_d_bytes / sizeof(float)];\n#endif\n\n  // Create local state, state dot and controls\n  float* y;\n  float* u;\n  int* crash_status;\n\n  // Initialize running cost and total cost\n  float* running_cost;\n  int sample_time_offset = 0;\n\n  // Load global array to shared array\n  y = &y_shared[(blockDim.x * thread_idz + thread_idx) * COST_T::OUTPUT_DIM];\n  u = &u_shared[(blockDim.x * thread_idz + thread_idx) * COST_T::CONTROL_DIM];\n  crash_status = &crash_status_shared[thread_idz * blockDim.x + thread_idx];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n  running_cost = &running_cost_shared[running_cost_index];\n  running_cost[0] = 0.0f;\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* bar = &barrier_shared[(blockDim.x * thread_idz + thread_idx)];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  /*<----Start of simulation loop-----> */\n#ifdef USE_COST_WITH_OFF_NUM_TIMESTEPS\n  const int max_time_iters = ceilf((float)(num_timesteps - 2) / blockDim.x);\n#else\n  const int max_time_iters = ceilf((float)num_timesteps / blockDim.x);\n#endif\n  costs->initializeCosts(y, u, theta_c, 0.0f, dt);\n  sampling->initializeDistributions(y, 0.0f, dt, theta_d);\n  __syncthreads();\n  for (int time_iter = 0; time_iter < max_time_iters; ++time_iter)\n  {\n    int t = thread_idx + time_iter * blockDim.x;\n    if (COALESCE)\n    {  // Fill entire shared mem sequentially using sequential threads_idx\n      int amount_to_fill = (time_iter + 1) * blockDim.x > num_timesteps ? num_timesteps % blockDim.x : blockDim.x;\n      mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_XY>(\n          y_shared, blockDim.x * thread_idz * COST_T::OUTPUT_DIM, y_d,\n          ((num_rollouts * thread_idz + global_idx) * num_timesteps + time_iter * blockDim.x) * COST_T::OUTPUT_DIM,\n          COST_T::OUTPUT_DIM * amount_to_fill);\n    }\n    else if (t < num_timesteps)\n    {  // t = num_timesteps is the terminal state for outside this for-loop\n      sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n      mp1::loadArrayParallel<COST_T::OUTPUT_DIM>(y, 0, y_d, sample_time_offset * COST_T::OUTPUT_DIM);\n    }\n    if (t < num_timesteps)\n    {  // load controls from t = 0 to t = num_timesteps - 1\n      sampling->readControlSample(global_idx, t, distribution_idx, u, theta_d, blockDim.y, thread_idy, y);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Compute cost\n    if (t < num_timesteps)\n    {\n      running_cost[0] +=\n          costs->computeRunningCost(y, u, t, theta_c, crash_status) +\n          sampling->computeLikelihoodRatioCost(u, theta_d, global_idx, t, distribution_idx, lambda, alpha);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n  }\n\n  // Add all costs together\n  running_cost = &running_cost_shared[blockDim.x * blockDim.y * thread_idz];\n  __syncthreads();\n  costArrayReduction(running_cost, blockDim.x * blockDim.y, thread_idx + blockDim.x * thread_idy,\n                     blockDim.x * blockDim.y, thread_idx == blockDim.x - 1 && thread_idy == 0);\n  // point every thread to the last output at t = NUM_TIMESTEPS for terminal cost calculation\n  const int last_y_index = (num_timesteps - 1) % blockDim.x;\n  y = &y_shared[(blockDim.x * thread_idz + last_y_index) * COST_T::OUTPUT_DIM];\n#ifdef USE_COST_WITH_OFF_NUM_TIMESTEPS\n  // load last output array\n  const int t = num_timesteps - 1;\n  mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_XY>(\n      y_shared, (blockDim.x * thread_idz + last_y_index) * COST_T::OUTPUT_DIM, y_d,\n      ((global_idx + num_rollouts * thread_idz) * num_timesteps + t) * COST_T::OUTPUT_DIM, COST_T::OUTPUT_DIM);\n  __syncthreads();\n#endif\n  // Compute terminal cost and the final cost for each thread\n  computeAndSaveCost(num_rollouts, num_timesteps, global_idx, costs, y, running_cost[0] / (num_timesteps), theta_c,\n                     trajectory_costs_d);\n}\n\ntemplate <class DYN_T, class SAMPLING_T>\n__global__ void rolloutDynamicsKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling, float dt,\n                                      const int num_timesteps, const int num_rollouts,\n                                      const float* __restrict__ init_x_d, float* __restrict__ y_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int block_idx = blockIdx.x;\n  const int global_idx = blockDim.x * block_idx + thread_idx;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int distribution_idx = threadIdx.z;\n  const int distribution_dim = blockDim.z;\n  const int sample_dim = blockDim.x;\n  // Ensure that there is enough room for the SHARED_MEM_REQUEST_GRD_BYTES and SHARED_MEM_REQUEST_BLK_BYTES portions to\n  // be aligned to the float4 boundary.\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(sample_dim * DYN_T::OUTPUT_DIM * distribution_dim)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(sample_dim * DYN_T::CONTROL_DIM * distribution_dim)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_DYN\n  barrier* barrier_shared = (barrier*)&theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n#endif\n\n  // Create local state, state dot and controls\n  float* x = &x_shared[shared_idx * DYN_T::STATE_DIM];\n  float* x_next = &x_next_shared[shared_idx * DYN_T::STATE_DIM];\n  float* x_temp;\n  float* xdot = &x_dot_shared[shared_idx * DYN_T::STATE_DIM];\n  float* u = &u_shared[shared_idx * DYN_T::CONTROL_DIM];\n  float* y = &y_shared[shared_idx * DYN_T::OUTPUT_DIM];\n#ifdef USE_CUDA_BARRIERS_DYN\n  barrier* bar = &barrier_shared[shared_idx];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  // Load global array to shared array\n  loadGlobalToShared<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM>(num_rollouts, blockDim.y, global_idx, thread_idy, thread_idz,\n                                                           init_x_d, x, xdot, u);\n  __syncthreads();\n\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0f, dt);\n  sampling->initializeDistributions(y, 0.0f, dt, theta_d_shared);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    sampling->readControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    // usually just control clamping\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    // Copy control constraints back to global memory\n    sampling->writeControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n    // Copy state to global memory\n    int sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n    mp1::loadArrayParallel<DYN_T::OUTPUT_DIM>(y_d, sample_time_offset * DYN_T::OUTPUT_DIM, y, 0);\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T>\n__global__ void visualizeKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling,\n                                COST_T* __restrict__ costs, float dt, const int num_timesteps, const int num_rollouts,\n                                const float* __restrict__ init_x_d, float lambda, float alpha, float* __restrict__ y_d,\n                                float* __restrict__ cost_traj_d, int* __restrict__ crash_status_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int block_idx = blockIdx.x;\n  const int global_idx = blockDim.x * block_idx + thread_idx;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int distribution_idx = threadIdx.z;\n  const int distribution_dim = blockDim.z;\n  const int sample_dim = blockDim.x;\n  // Ensure that there is enough room for the SHARED_MEM_REQUEST_GRD_BYTES and SHARED_MEM_REQUEST_BLK_BYTES portions to\n  // be aligned to the float4 boundary.\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(sample_dim * DYN_T::OUTPUT_DIM * distribution_dim)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(sample_dim * DYN_T::CONTROL_DIM * distribution_dim)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n  float* theta_c_shared = &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n  float* running_cost_shared = &theta_c_shared[size_of_theta_c_bytes / sizeof(float)];\n  int* crash_status_shared =\n      (int*)&running_cost_shared[math::nearest_multiple_4(num_timesteps * blockDim.y * blockDim.z)];\n\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* barrier_shared = (barrier*)&crash_status_shared[math::nearest_multiple_4(sample_dim * distribution_dim)];\n#endif\n\n  // Create local state, state dot and controls\n  float* x = &x_shared[shared_idx * DYN_T::STATE_DIM];\n  float* x_next = &x_next_shared[shared_idx * DYN_T::STATE_DIM];\n  float* x_temp;\n  float* xdot = &x_dot_shared[shared_idx * DYN_T::STATE_DIM];\n  float* u = &u_shared[shared_idx * DYN_T::CONTROL_DIM];\n  float* y = &y_shared[shared_idx * DYN_T::OUTPUT_DIM];\n  float* running_cost = &running_cost_shared[blockDim.x * (thread_idz * blockDim.y + thread_idy)];\n  int* crash_status = &crash_status_shared[shared_idx];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n  int cost_index;\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* bar = &barrier_shared[shared_idx];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  // Load global array to shared array\n  loadGlobalToShared<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM>(num_rollouts, blockDim.y, global_idx, thread_idy, thread_idz,\n                                                           init_x_d, x, xdot, u);\n  __syncthreads();\n\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0f, dt);\n  sampling->initializeDistributions(y, 0.0f, dt, theta_d_shared);\n  costs->initializeCosts(y, u, theta_c_shared, 0.0f, dt);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    sampling->readVisControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    // usually just control clamping\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    if (t > 0)\n    {\n      float cost =\n          costs->computeRunningCost(y, u, t, theta_c_shared, crash_status) +\n          sampling->computeLikelihoodRatioCost(u, theta_d_shared, global_idx, t, distribution_idx, lambda, alpha);\n      running_cost[t - 1] = cost / (num_timesteps);\n      crash_status_d[global_idx * num_timesteps + t] = crash_status[0];\n    }\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n    // Copy state to global memory\n    int sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n    mp1::loadArrayParallel<DYN_T::OUTPUT_DIM>(y_d, sample_time_offset * DYN_T::OUTPUT_DIM, y, 0);\n  }\n\n  // Add all thread_y components of cost together\n  running_cost = &running_cost_shared[thread_idx + blockDim.x * blockDim.y * thread_idz];\n  __syncthreads();\n  costArrayReduction(running_cost, blockDim.y, thread_idy, blockDim.y, thread_idy == blockDim.y - 1, blockDim.x);\n  // Compute terminal cost for each thread\n  if (threadIdx.x == 0 && threadIdx.y == 0)\n  {\n    cost_index = (threadIdx.z * num_rollouts + global_idx) * (num_timesteps + 1) + num_timesteps;\n    cost_traj_d[cost_index] = costs->terminalCost(y, theta_c_shared) / (num_timesteps);\n  }\n  __syncthreads();\n  // Copy to global memory\n  int parallel_index, step;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_X>(parallel_index, step);\n  if (num_timesteps % 4 == 0)\n  {\n    float4* cost_traj_d4 =\n        reinterpret_cast<float4*>(&cost_traj_d[(thread_idz * num_rollouts + global_idx) * num_timesteps]);\n    float4* running_cost_shared4 =\n        reinterpret_cast<float4*>(&running_cost_shared[thread_idz * num_timesteps * blockDim.y]);\n    for (int i = parallel_index; i < num_timesteps / 4; i += step)\n    {\n      cost_traj_d4[i] = running_cost_shared4[i];\n    }\n  }\n  else if (num_timesteps % 2 == 0)\n  {\n    float2* cost_traj_d2 =\n        reinterpret_cast<float2*>(&cost_traj_d[(thread_idz * num_rollouts + global_idx) * num_timesteps]);\n    float2* running_cost_shared2 =\n        reinterpret_cast<float2*>(&running_cost_shared[thread_idz * num_timesteps * blockDim.y]);\n    for (int i = parallel_index; i < num_timesteps / 2; i += step)\n    {\n      cost_traj_d2[i] = running_cost_shared2[i];\n    }\n  }\n  else\n  {\n    for (int i = parallel_index; i < num_timesteps; i += step)\n    {\n      cost_traj_d[(thread_idz * num_rollouts + global_idx) * num_timesteps + i] =\n          running_cost_shared[thread_idz * num_timesteps * blockDim.y + i];\n    }\n  }\n}\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE>\n__global__ void visualizeCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                                    const int num_timesteps, const int num_rollouts, const float lambda, float alpha,\n                                    const float* __restrict__ y_d, float* __restrict__ cost_traj_d,\n                                    int* __restrict__ crash_status_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int global_idx = blockIdx.x;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int distribution_idx = threadIdx.z;\n\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* y_shared = entire_buffer;\n  float* u_shared = &y_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::OUTPUT_DIM)];\n  float* running_cost_shared = &u_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::CONTROL_DIM)];\n  int* crash_status_shared =\n      (int*)&running_cost_shared[math::nearest_multiple_4(blockDim.y * blockDim.z * num_timesteps)];\n  float* theta_c = (float*)&crash_status_shared[math::nearest_multiple_4(blockDim.x * blockDim.z)];\n  float* theta_d = &theta_c[size_of_theta_c_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_COST\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  barrier* barrier_shared = (barrier*)&theta_d[size_of_theta_d_bytes / sizeof(float)];\n#endif\n  // Create local state, state dot and controls\n  float* y;\n  float* u;\n  int* crash_status;\n\n  // Initialize running cost and total cost\n  float* running_cost;\n  int sample_time_offset = 0;\n  int cost_index = 0;\n\n  // Load global array to shared array\n  y = &y_shared[shared_idx * COST_T::OUTPUT_DIM];\n  u = &u_shared[shared_idx * COST_T::CONTROL_DIM];\n  crash_status = &crash_status_shared[shared_idx];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* bar = &barrier_shared[(blockDim.x * thread_idz + thread_idx)];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  /*<----Start of simulation loop-----> */\n#ifdef USE_COST_WITH_OFF_NUM_TIMESTEPS\n  const int max_time_iters = ceilf((float)(num_timesteps - 2) / blockDim.x);\n#else\n  const int max_time_iters = ceilf((float)num_timesteps / blockDim.x);\n#endif\n  costs->initializeCosts(y, u, theta_c, 0.0f, dt);\n  sampling->initializeDistributions(y, 0.0f, dt, theta_d);\n  __syncthreads();\n  for (int time_iter = 0; time_iter < max_time_iters; ++time_iter)\n  {\n    int t = thread_idx + time_iter * blockDim.x;\n    cost_index = (thread_idz * num_rollouts + global_idx) * (num_timesteps) + t - 1;\n    running_cost = &running_cost_shared[blockDim.x * (thread_idz * blockDim.y + thread_idy) + t - 1];\n    if (COALESCE)\n    {  // Fill entire shared mem sequentially using sequential threads_idx\n      int amount_to_fill = (time_iter + 1) * blockDim.x > num_timesteps ? num_timesteps % blockDim.x : blockDim.x;\n      mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_XY>(\n          y_shared, blockDim.x * thread_idz * COST_T::OUTPUT_DIM, y_d,\n          ((num_rollouts * thread_idz + global_idx) * num_timesteps + time_iter * blockDim.x) * COST_T::OUTPUT_DIM,\n          COST_T::OUTPUT_DIM * amount_to_fill);\n    }\n    else if (t < num_timesteps)\n    {  // t = num_timesteps is the terminal state for outside this for-loop\n      sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n      mp1::loadArrayParallel<COST_T::OUTPUT_DIM>(y, 0, y_d, sample_time_offset * COST_T::OUTPUT_DIM);\n    }\n    if (t < num_timesteps)\n    {  // load controls from t = 0 to t = num_timesteps - 1\n      sampling->readVisControlSample(global_idx, t, distribution_idx, u, theta_d, blockDim.y, thread_idy, y);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Compute cost\n    if (t < num_timesteps)\n    {\n      float cost = costs->computeRunningCost(y, u, t, theta_c, crash_status) +\n                   sampling->computeLikelihoodRatioCost(u, theta_d, global_idx, t, distribution_idx, lambda, alpha);\n      running_cost[0] = cost / (num_timesteps);\n      crash_status_d[global_idx * num_timesteps + t] = crash_status[0];\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n  }\n  // consolidate y threads into single cost\n  running_cost = &running_cost_shared[thread_idx + blockDim.x * blockDim.y * thread_idz];\n  __syncthreads();\n  costArrayReduction(running_cost, blockDim.y, thread_idy, blockDim.y, thread_idy == blockDim.y - 1, blockDim.x);\n  // point every thread to the last output at t = NUM_TIMESTEPS for terminal cost calculation\n  const int last_y_index = (num_timesteps - 1) % blockDim.x;\n  y = &y_shared[(blockDim.x * thread_idz + last_y_index) * COST_T::OUTPUT_DIM];\n#ifdef USE_COST_WITH_OFF_NUM_TIMESTEPS\n  // load last output array\n  const int t = num_timesteps - 1;\n  mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_XY>(\n      y_shared, (blockDim.x * thread_idz + last_y_index) * COST_T::OUTPUT_DIM, y_d,\n      ((global_idx + num_rollouts * thread_idz) * num_timesteps + t) * COST_T::OUTPUT_DIM, COST_T::OUTPUT_DIM);\n  __syncthreads();\n#endif\n  // Compute terminal cost for each thread\n  if (threadIdx.x == 0 && threadIdx.y == 0)\n  {\n    cost_index = (threadIdx.z * num_rollouts + global_idx) * (num_timesteps + 1) + num_timesteps;\n    cost_traj_d[cost_index] = costs->terminalCost(y, theta_c) / (num_timesteps);\n  }\n  __syncthreads();\n  // Copy to global memory\n  if (num_timesteps % 4 == 0)\n  {\n    float4* cost_traj_d4 =\n        reinterpret_cast<float4*>(&cost_traj_d[(thread_idz * num_rollouts + global_idx) * num_timesteps]);\n    float4* running_cost_shared4 =\n        reinterpret_cast<float4*>(&running_cost_shared[thread_idz * num_timesteps * blockDim.y]);\n    for (int i = thread_idx; i < num_timesteps / 4; i += blockDim.x)\n    {\n      cost_traj_d4[i] = running_cost_shared4[i];\n    }\n  }\n  else if (num_timesteps % 2 == 0)\n  {\n    float2* cost_traj_d2 =\n        reinterpret_cast<float2*>(&cost_traj_d[(thread_idz * num_rollouts + global_idx) * num_timesteps]);\n    float2* running_cost_shared2 =\n        reinterpret_cast<float2*>(&running_cost_shared[thread_idz * num_timesteps * blockDim.y]);\n    for (int i = thread_idx; i < num_timesteps / 2; i += blockDim.x)\n    {\n      cost_traj_d2[i] = running_cost_shared2[i];\n    }\n  }\n  else\n  {\n    for (int i = thread_idx; i < num_timesteps; i += blockDim.x)\n    {\n      cost_traj_d[(thread_idz * num_rollouts + global_idx) * num_timesteps + i] =\n          running_cost_shared[thread_idz * num_timesteps * blockDim.y + i];\n    }\n  }\n}\n\n__global__ void normExpKernel(int num_rollouts, float* trajectory_costs_d, float lambda_inv, float baseline)\n{\n  int global_idx = (blockDim.x * blockIdx.x + threadIdx.x) * blockDim.z + threadIdx.z;\n  int global_step = blockDim.x * gridDim.x * blockDim.z * gridDim.z;\n  // #if defined(CUDA_VERSION) && CUDA_VERSION > 11060\n  //   auto block = cg::this_grid();\n  //   int global_idx_b = block.thread_rank() + block.block_rank() * block.num_threads();\n  //   int global_step_b = block.num_threads() * block.num_blocks();\n  //   if (global_idx == 200 && threadIdx.y == 0 && threadIdx.z == 0)\n  //   {\n  //     printf(\"Global ind: %d, thread_rank: %d\\n\", global_idx, global_idx_b);\n  //     printf(\"Global step: %d, thread_rank: %d\\n\", global_step, global_step_b);\n  //   }\n  // #endif\n  normExpTransform(num_rollouts * blockDim.z, trajectory_costs_d, lambda_inv, baseline, global_idx, global_step);\n}\n\n__global__ void TsallisKernel(int num_rollouts, float* trajectory_costs_d, float gamma, float r, float baseline)\n{\n  int global_idx = (blockDim.x * blockIdx.x + threadIdx.x) * blockDim.z + threadIdx.z;\n  int global_step = blockDim.x * gridDim.x * blockDim.z * gridDim.z;\n  TsallisTransform(num_rollouts * blockDim.z, trajectory_costs_d, gamma, r, baseline, global_idx, global_step);\n}\n\ntemplate <int CONTROL_DIM>\n__global__ void weightedReductionKernel(const float* __restrict__ exp_costs_d, const float* __restrict__ du_d,\n                                        float* __restrict__ new_u_d, const float normalizer, const int num_timesteps,\n                                        const int num_rollouts, const int sum_stride)\n{\n  int thread_idx = threadIdx.x;  // Rollout index\n  int block_idx = blockIdx.x;    // Timestep\n\n  // Create a shared array for intermediate sums: CONTROL_DIM x NUM_THREADS\n  extern __shared__ float u_intermediate[];\n\n  float u[CONTROL_DIM];\n  setInitialControlToZero(CONTROL_DIM, thread_idx, u, u_intermediate);\n\n  __syncthreads();\n\n  // Sum the weighted control variations at a desired stride\n  strideControlWeightReduction(num_rollouts, num_timesteps, sum_stride, thread_idx, block_idx, CONTROL_DIM, exp_costs_d,\n                               normalizer, du_d, u, u_intermediate);\n\n  __syncthreads();\n\n  // Sum all weighted control variations\n  rolloutWeightReductionAndSaveControl(thread_idx, block_idx, num_rollouts, num_timesteps, CONTROL_DIM, sum_stride, u,\n                                       u_intermediate, new_u_d);\n\n  __syncthreads();\n}\n\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int SUM_STRIDE>\n__global__ void weightedReductionKernel(float* exp_costs_d, float* du_d, float* du_new_d,\n                                        float2* baseline_and_normalizer_d, int num_timesteps)\n{\n  int thread_idx = threadIdx.x;  // Rollout index\n  int block_idx = blockIdx.x;    // Timestep\n\n  // Create a shared array for intermediate sums: CONTROL_DIM x NUM_THREADS\n  __shared__ float u_intermediate[CONTROL_DIM * ((NUM_ROLLOUTS - 1) / SUM_STRIDE + 1)];\n\n  float u[CONTROL_DIM];\n  setInitialControlToZero(CONTROL_DIM, thread_idx, u, u_intermediate);\n\n  __syncthreads();\n\n  // Sum the weighted control variations at a desired stride\n  strideControlWeightReduction(NUM_ROLLOUTS, num_timesteps, SUM_STRIDE, thread_idx, block_idx, CONTROL_DIM, exp_costs_d,\n                               baseline_and_normalizer_d->y, du_d, u, u_intermediate);\n\n  __syncthreads();\n\n  // Sum all weighted control variations\n  rolloutWeightReductionAndSaveControl(thread_idx, block_idx, NUM_ROLLOUTS, num_timesteps, CONTROL_DIM, SUM_STRIDE, u,\n                                       u_intermediate, du_new_d);\n\n  __syncthreads();\n}\n\n/*******************************************************************************************************************\n * Rollout Kernel Helpers\n *******************************************************************************************************************/\ntemplate <int STATE_DIM, int CONTROL_DIM>\n__device__ void loadGlobalToShared(const int num_rollouts, const int blocksize_y, const int global_idx,\n                                   const int thread_idy, const int thread_idz, const float* __restrict__ x_device,\n                                   float* __restrict__ x_thread, float* __restrict__ xdot_thread,\n                                   float* __restrict__ u_thread)\n{\n  // Transfer to shared memory\n  int i;\n  if (global_idx < num_rollouts)\n  {\n#if true\n    mp1::loadArrayParallel<STATE_DIM>(x_thread, 0, x_device, STATE_DIM * thread_idz);\n    if (STATE_DIM % 4 == 0)\n    {\n      float4* xdot4_t = reinterpret_cast<float4*>(xdot_thread);\n      for (i = thread_idy; i < STATE_DIM / 4; i += blocksize_y)\n      {\n        xdot4_t[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);\n      }\n    }\n    else if (STATE_DIM % 2 == 0)\n    {\n      float2* xdot2_t = reinterpret_cast<float2*>(xdot_thread);\n      for (i = thread_idy; i < STATE_DIM / 2; i += blocksize_y)\n      {\n        xdot2_t[i] = make_float2(0.0f, 0.0f);\n      }\n    }\n    else\n    {\n      for (i = thread_idy; i < STATE_DIM; i += blocksize_y)\n      {\n        xdot_thread[i] = 0.0f;\n      }\n    }\n\n    if (CONTROL_DIM % 4 == 0)\n    {\n      float4* u4_t = reinterpret_cast<float4*>(u_thread);\n      for (i = thread_idy; i < CONTROL_DIM / 4; i += blocksize_y)\n      {\n        u4_t[i] = make_float4(0.0f, 0.0f, 0.0f, 0.0f);\n      }\n    }\n    else if (CONTROL_DIM % 2 == 0)\n    {\n      float2* u2_t = reinterpret_cast<float2*>(u_thread);\n      for (i = thread_idy; i < CONTROL_DIM / 2; i += blocksize_y)\n      {\n        u2_t[i] = make_float2(0.0f, 0.0f);\n      }\n    }\n    else\n    {\n      for (i = thread_idy; i < CONTROL_DIM; i += blocksize_y)\n      {\n        u_thread[i] = 0.0f;\n      }\n    }\n#else\n    for (i = thread_idy; i < STATE_DIM; i += blocksize_y)\n    {\n      x_thread[i] = x_device[i + STATE_DIM * thread_idz];\n      xdot_thread[i] = 0.0f;\n    }\n    for (i = thread_idy; i < CONTROL_DIM; i += blocksize_y)\n    {\n      u_thread[i] = 0.0f;\n    }\n#endif\n  }\n}\n\ntemplate <class COST_T>\n__device__ void computeAndSaveCost(int num_rollouts, int num_timesteps, int global_idx, COST_T* costs, float* output,\n                                   float running_cost, float* theta_c, float* cost_rollouts_device)\n{\n  // only want to save 1 cost per trajectory\n  if (threadIdx.y == 0 && global_idx < num_rollouts)\n  {\n    cost_rollouts_device[global_idx + num_rollouts * threadIdx.z] =\n        running_cost + costs->terminalCost(output, theta_c) / (num_timesteps);\n  }\n}\n\n/*******************************************************************************************************************\n * NormExp Kernel Helpers\n *******************************************************************************************************************/\nfloat computeBaselineCost(float* cost_rollouts_host, int num_rollouts)\n{  // TODO if we use standard containers in MPPI, should this be replaced with a min algorithm?\n  int best_idx = computeBestIndex(cost_rollouts_host, num_rollouts);\n  return cost_rollouts_host[best_idx];\n}\n\nfloat constructBestWeights(float* cost_rollouts_host, int num_rollouts)\n{\n  int best_idx = computeBestIndex(cost_rollouts_host, num_rollouts);\n  float best_cost = cost_rollouts_host[best_idx];\n\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    if (i == best_idx)\n    {\n      cost_rollouts_host[i] = 1.0;\n    }\n    else\n    {\n      cost_rollouts_host[i] = 0.0;\n    }\n  }\n\n  // printf(\"Best idx: %d, cost: %f\\n\", best_cost_idx, best_cost);\n  return best_cost;\n}\n\nint computeBestIndex(float* cost_rollouts_host, int num_rollouts)\n{\n  float best_cost = cost_rollouts_host[0];\n  int best_cost_idx = 0;\n  for (int i = 1; i < num_rollouts; i++)\n  {\n    if (cost_rollouts_host[i] < best_cost)\n    {\n      best_cost = cost_rollouts_host[i];\n      best_cost_idx = i;\n    }\n  }\n\n  // printf(\"Best idx: %d, cost: %f\\n\", best_cost_idx, best_cost);\n  return best_cost_idx;\n}\n\n__device__ inline float computeBaselineCost(int num_rollouts, const float* __restrict__ trajectory_costs_d,\n                                            float* __restrict__ reduction_buffer, int rollout_idx_global,\n                                            int rollout_idx_step)\n{\n  // Copy costs to shared memory\n  float min_cost = 0.0;\n#if false\n  // potential method to speed up copying costs\n  int prev_size = min(blockDim.x, num_rollouts);\n  float my_val = (rollout_idx_global < num_rollouts) ? trajectory_costs_d[rollout_idx_global] : INFINITY;\n  for (int i = rollout_idx_global + rollout_idx_step; i < num_rollouts; i += rollout_idx_step)\n  {\n    my_val = min(trajectory_costs_d[i], my_val);\n  }\n  reduction_buffer[rollout_idx_global] = my_val;\n  // __syncthreads();\n  // if (threadIdx.x == 0)\n  // {\n  //   for (int i = 0; i < min(blockDim.x, num_rollouts); i++)\n  //   {\n  //     printf(\"buff %d: %f\\n\", i, reduction_buffer[i]);\n  //   }\n  //   printf(\"Num rollouts; %d\\n\", num_rollouts);\n  // }\n#else\n  int prev_size = num_rollouts / 2;\n  for (int i = rollout_idx_global; i < prev_size; i += rollout_idx_step)\n  {\n    reduction_buffer[i] = min(trajectory_costs_d[i], trajectory_costs_d[i + prev_size]);\n  }\n  if (num_rollouts - 2 * prev_size == 1 && threadIdx.x == blockDim.x - 1)\n  {\n    reduction_buffer[prev_size - 1] = min(reduction_buffer[num_rollouts - 1], reduction_buffer[prev_size - 1]);\n  }\n#endif\n\n  __syncthreads();\n  // find min along the entire array\n  for (int size = prev_size / 2; size > 0; size /= 2)\n  {\n    for (int i = rollout_idx_global; i < size; i += rollout_idx_step)\n    {\n      reduction_buffer[i] = min(reduction_buffer[i], reduction_buffer[i + size]);\n    }\n    __syncthreads();\n    if (prev_size - 2 * size == 1 && threadIdx.x == blockDim.x - 1)\n    {\n      reduction_buffer[size - 1] = min(reduction_buffer[size - 1], reduction_buffer[prev_size - 1]);\n    }\n    __syncthreads();\n    prev_size = size;\n  }\n  min_cost = reduction_buffer[0];\n  return min_cost;\n}\n\n__device__ __host__ inline void normExpTransform(int num_rollouts, float* __restrict__ trajectory_costs_d,\n                                                 float lambda_inv, float baseline, int global_idx, int rollout_idx_step)\n{\n  for (int i = global_idx; i < num_rollouts; i += rollout_idx_step)\n  {\n    float cost_dif = trajectory_costs_d[i] - baseline;\n    trajectory_costs_d[i] = expf(-lambda_inv * cost_dif);\n  }\n}\n\n__device__ __host__ inline void TsallisTransform(int num_rollouts, float* __restrict__ trajectory_costs_d, float gamma,\n                                                 float r, float baseline, int global_idx, int rollout_idx_step)\n{\n  for (int i = global_idx; i < num_rollouts; i += rollout_idx_step)\n  {\n    float cost_dif = trajectory_costs_d[i] - baseline;\n    // trajectory_costs_d[i] = mppi::math::expr(-lambda_bar_inv * cost_dif);\n    // trajectory_costs_d[i] = (cost_dif < gamma) * expf(logf(1.0 - cost_dif / gamma) / (r - 1));\n    if (cost_dif < gamma)\n    {\n      trajectory_costs_d[i] = expf(logf(1.0 - cost_dif / gamma) / (r - 1));\n    }\n    else\n    {\n      trajectory_costs_d[i] = 0;\n    }\n  }\n}\n\n__device__ inline float computeNormalizer(int num_rollouts, const float* __restrict__ trajectory_costs_d,\n                                          float* __restrict__ reduction_buffer, int rollout_idx_global,\n                                          int rollout_idx_step)\n{\n  // Copy costs to shared memory\n#if false\n  // potential method to speed up copying costs\n  int prev_size = min(blockDim.x, num_rollouts);\n  float my_val = (rollout_idx_global < num_rollouts) ? trajectory_costs_d[rollout_idx_global] : 0;\n  for (int i = rollout_idx_global + rollout_idx_step; i < num_rollouts; i += rollout_idx_step)\n  {\n    my_val += trajectory_costs_d[i];\n  }\n  reduction_buffer[rollout_idx_global] = my_val;\n#else\n  int prev_size = num_rollouts / 2;\n  for (int i = rollout_idx_global; i < prev_size; i += rollout_idx_step)\n  {\n    reduction_buffer[i] = trajectory_costs_d[i] + trajectory_costs_d[i + prev_size];\n  }\n  if (num_rollouts - 2 * prev_size == 1 && threadIdx.x == blockDim.x - 1)\n  {\n    reduction_buffer[prev_size - 1] += reduction_buffer[num_rollouts - 1];\n  }\n#endif\n  __syncthreads();\n  // sum the entire array\n  for (int size = prev_size / 2; size > 0; size /= 2)\n  {\n    for (int i = rollout_idx_global; i < size; i += rollout_idx_step)\n    {\n      reduction_buffer[i] += reduction_buffer[i + size];\n    }\n    __syncthreads();\n    if (prev_size - 2 * size == 1 && threadIdx.x == blockDim.x - 1)\n    {\n      reduction_buffer[size - 1] += reduction_buffer[prev_size - 1];\n    }\n    __syncthreads();\n    prev_size = size;\n  }\n  return reduction_buffer[0];\n}\n\ntemplate <int NUM_ROLLOUTS, int BLOCKSIZE_X = 1024>\n__global__ void fullGPUcomputeWeights(float* __restrict__ trajectory_costs_d, float lambda_inv,\n                                      float2* __restrict__ output)\n{\n  __shared__ float reduction_buffer[NUM_ROLLOUTS];\n  // int global_idx = blockIdx.x * blockDim.x + threadIdx.x;\n  // int better_global_idx = (blockIdx.x * blockDim.x + threadIdx.x) * blockDim.y + threadIdx.y;\n  // int global_idx = (blockDim.x * blockIdx.x + threadIdx.x) * blockDim.z + threadIdx.z;\n  // int global_step = blockDim.x * gridDim.x;\n  // int better_global_step = blockDim.x * gridDim.x  * blockDim.y * gridDim.y;\n  int global_idx = threadIdx.x;\n  int global_step = blockDim.x;\n\n  float baseline = computeBaselineCost(NUM_ROLLOUTS, trajectory_costs_d, reduction_buffer, global_idx, global_step);\n  normExpTransform(NUM_ROLLOUTS, trajectory_costs_d, lambda_inv, baseline, global_idx, global_step);\n  __syncthreads();\n  float normalizer = computeNormalizer(NUM_ROLLOUTS, trajectory_costs_d, reduction_buffer, global_idx, global_step);\n  __syncthreads();\n  if (threadIdx.x == 0)\n  {\n    *output = make_float2(baseline, normalizer);\n  }\n}\n\nfloat computeNormalizer(float* cost_rollouts_host, int num_rollouts)\n{\n  double normalizer = 0.0;\n  for (int i = 0; i < num_rollouts; ++i)\n  {\n    normalizer += cost_rollouts_host[i];\n  }\n  return normalizer;\n}\n\nvoid computeFreeEnergy(float& free_energy, float& free_energy_var, float& free_energy_modified,\n                       float* cost_rollouts_host, int num_rollouts, float baseline, float lambda)\n{\n  float var = 0;\n  float norm = 0;\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    norm += cost_rollouts_host[i];\n    var += SQ(cost_rollouts_host[i]);\n  }\n  norm /= num_rollouts;\n  free_energy = -lambda * logf(norm) + baseline;\n  free_energy_var = lambda * (var / num_rollouts - SQ(norm));\n  // TODO Figure out the point of the following lines\n  float weird_term = free_energy_var / (norm * sqrtf(1.0 * num_rollouts));\n  free_energy_modified = lambda * (weird_term + 0.5 * SQ(weird_term));\n}\n\n/*******************************************************************************************************************\n * Weighted Reduction Kernel Helpers\n *******************************************************************************************************************/\n__device__ void setInitialControlToZero(int control_dim, int thread_idx, float* __restrict__ u,\n                                        float* __restrict__ u_intermediate)\n{\n  if (control_dim % 4 == 0)\n  {\n    for (int i = 0; i < control_dim / 4; i++)\n    {\n      reinterpret_cast<float4*>(u)[i] = make_float4(0, 0, 0, 0);\n      reinterpret_cast<float4*>(&u_intermediate[thread_idx * control_dim])[i] = make_float4(0, 0, 0, 0);\n    }\n  }\n  else if (control_dim % 2 == 0)\n  {\n    for (int i = 0; i < control_dim / 2; i++)\n    {\n      reinterpret_cast<float2*>(u)[i] = make_float2(0, 0);\n      reinterpret_cast<float2*>(&u_intermediate[thread_idx * control_dim])[i] = make_float2(0, 0);\n    }\n  }\n  else\n  {\n    for (int i = 0; i < control_dim; i++)\n    {\n      u[i] = 0;\n      u_intermediate[thread_idx * control_dim + i] = 0;\n    }\n  }\n}\n\n__device__ void strideControlWeightReduction(const int num_rollouts, const int num_timesteps, const int sum_stride,\n                                             const int thread_idx, const int block_idx, const int control_dim,\n                                             const float* __restrict__ exp_costs_d, const float normalizer,\n                                             const float* __restrict__ du_d, float* __restrict__ u,\n                                             float* __restrict__ u_intermediate)\n{\n  // int index = thread_idx * sum_stride + i;\n  for (int i = 0; i < sum_stride; ++i)\n  {  // Iterate through the size of the subsection\n    if ((thread_idx * sum_stride + i) < num_rollouts)\n    {                                                                        // Ensure we do not go out of bounds\n      float weight = exp_costs_d[thread_idx * sum_stride + i] / normalizer;  // compute the importance sampling weight\n      for (int j = 0; j < control_dim; ++j)\n      {  // Iterate through the control dimensions\n        // Rollout index: (thread_idx*sum_stride + i)*(num_timesteps*control_dim)\n        // Current timestep: block_idx*control_dim\n        u[j] = du_d[(thread_idx * sum_stride + i) * (num_timesteps * control_dim) + block_idx * control_dim + j];\n        u_intermediate[thread_idx * control_dim + j] += weight * u[j];\n      }\n    }\n  }\n}\n\n__device__ void rolloutWeightReductionAndSaveControl(int thread_idx, int block_idx, int num_rollouts, int num_timesteps,\n                                                     int control_dim, int sum_stride, float* u, float* u_intermediate,\n                                                     float* du_new_d)\n{\n  if (thread_idx == 0 && block_idx < num_timesteps)\n  {  // block index refers to the current timestep\n    for (int i = 0; i < control_dim; ++i)\n    {  // TODO replace with memset?\n      u[i] = 0;\n    }\n    for (int i = 0; i < ((num_rollouts - 1) / sum_stride + 1); ++i)\n    {  // iterate through the each subsection\n      for (int j = 0; j < control_dim; ++j)\n      {\n        u[j] += u_intermediate[i * control_dim + j];\n      }\n    }\n    for (int i = 0; i < control_dim; i++)\n    {\n      du_new_d[block_idx * control_dim + i] = u[i];\n    }\n  }\n}\n\ntemplate <int BLOCKSIZE>\n__device__ void warpReduceAdd(volatile float* sdata, const int tid, const int stride)\n{\n  if (BLOCKSIZE >= 64)\n  {\n    sdata[tid * stride] += sdata[(tid + 32) * stride];\n  }\n  if (BLOCKSIZE >= 32)\n  {\n    sdata[tid * stride] += sdata[(tid + 16) * stride];\n  }\n  if (BLOCKSIZE >= 16)\n  {\n    sdata[tid * stride] += sdata[(tid + 8) * stride];\n  }\n  if (BLOCKSIZE >= 8)\n  {\n    sdata[tid * stride] += sdata[(tid + 4) * stride];\n  }\n  if (BLOCKSIZE >= 4)\n  {\n    sdata[tid * stride] += sdata[(tid + 2) * stride];\n  }\n  if (BLOCKSIZE >= 2)\n  {\n    sdata[tid * stride] += sdata[(tid + 1) * stride];\n  }\n}\n\n__device__ void costArrayReduction(float* running_cost, const int start_size, const int index, const int step,\n                                   const bool catch_condition, const int stride)\n{\n  int prev_size = start_size;\n  const bool block_power_of_2 = (prev_size & (prev_size - 1)) == 0;\n  const int stop_condition = (block_power_of_2) ? 32 : 0;\n  int size;\n  int j;\n\n  for (size = prev_size / 2; size > stop_condition; size /= 2)\n  {\n    for (j = index; j < size; j += step)\n    {\n      running_cost[j * stride] += running_cost[(j + size) * stride];\n    }\n    __syncthreads();\n    if (prev_size - 2 * size == 1 && catch_condition)\n    {\n      running_cost[(size - 1) * stride] += running_cost[(prev_size - 1) * stride];\n    }\n    __syncthreads();\n    prev_size = size;\n  }\n  switch (size * 2)\n  {\n    case 64:\n      if (index < 32)\n      {\n        warpReduceAdd<64>(running_cost, index, stride);\n      }\n      break;\n    case 32:\n      if (index < 16)\n      {\n        warpReduceAdd<32>(running_cost, index, stride);\n      }\n      break;\n    case 16:\n      if (index < 8)\n      {\n        warpReduceAdd<16>(running_cost, index, stride);\n      }\n      break;\n    case 8:\n      if (index < 4)\n      {\n        warpReduceAdd<8>(running_cost, index, stride);\n      }\n      break;\n    case 4:\n      if (index < 2)\n      {\n        warpReduceAdd<4>(running_cost, index, stride);\n      }\n      break;\n    case 2:\n      if (index < 1)\n      {\n        warpReduceAdd<2>(running_cost, index, stride);\n      }\n      break;\n  }\n  __syncthreads();\n}\n\n/*******************************************************************************************************************\n * Launch Functions\n *******************************************************************************************************************/\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T, bool COALESCE>\nvoid launchSplitRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                              SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                              const int num_rollouts, float lambda, float alpha, float* __restrict__ init_x_d,\n                              float* __restrict__ y_d, float* __restrict__ trajectory_costs, dim3 dimDynBlock,\n                              dim3 dimCostBlock, cudaStream_t stream, bool synchronize)\n{\n  if (num_rollouts % dimDynBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by dynamics block size x (\" << dimDynBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  if (num_timesteps < dimCostBlock.x)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_timesteps (\" << num_timesteps\n              << \") must be greater than or equal to cost block size x (\" << dimCostBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  // Run Dynamics\n  const int gridsize_x = math::int_ceil(num_rollouts, dimDynBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned dynamics_shared_size = calcRolloutDynamicsKernelSharedMemSize(dynamics, sampling, dimDynBlock);\n  HANDLE_ERROR(cudaFuncSetAttribute(rolloutDynamicsKernel<DYN_T, SAMPLING_T>,\n                                    cudaFuncAttributeMaxDynamicSharedMemorySize, dynamics_shared_size));\n  rolloutDynamicsKernel<DYN_T, SAMPLING_T><<<dimGrid, dimDynBlock, dynamics_shared_size, stream>>>(\n      dynamics->model_d_, sampling->sampling_d_, dt, num_timesteps, num_rollouts, init_x_d, y_d);\n  HANDLE_ERROR(cudaGetLastError());\n  // Run Costs\n  dim3 dimCostGrid(num_rollouts, 1, 1);\n  unsigned cost_shared_size = calcRolloutCostKernelSharedMemSize(costs, sampling, dimCostBlock);\n  rolloutCostKernel<COST_T, SAMPLING_T, COALESCE><<<dimCostGrid, dimCostBlock, cost_shared_size, stream>>>(\n      costs->cost_d_, sampling->sampling_d_, dt, num_timesteps, num_rollouts, lambda, alpha, y_d, trajectory_costs);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T>\nvoid launchRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling,\n                         float dt, const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                         float* __restrict__ init_x_d, float* __restrict__ trajectory_costs, dim3 dimBlock,\n                         cudaStream_t stream, bool synchronize)\n{\n  if (num_rollouts % dimBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by rollout thread block size x (\" << dimBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n\n  const int gridsize_x = math::int_ceil(num_rollouts, dimBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned shared_mem_size = calcRolloutCombinedKernelSharedMemSize(dynamics, costs, sampling, dimBlock);\n  HANDLE_ERROR(cudaFuncSetAttribute(rolloutKernel<DYN_T, COST_T, SAMPLING_T>,\n                                    cudaFuncAttributeMaxDynamicSharedMemorySize, shared_mem_size));\n  rolloutKernel<DYN_T, COST_T, SAMPLING_T><<<dimGrid, dimBlock, shared_mem_size, stream>>>(\n      dynamics->model_d_, sampling->sampling_d_, costs->cost_d_, dt, num_timesteps, num_rollouts, init_x_d, lambda,\n      alpha, trajectory_costs);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T>\nvoid launchVisualizeKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling,\n                           float dt, const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                           float* __restrict__ init_x_d, float* __restrict__ y_d, float* __restrict__ trajectory_costs,\n                           int* __restrict__ crash_status_d, dim3 dimVisBlock, cudaStream_t stream, bool synchronize)\n{\n  if (num_rollouts <= 1)\n  {  // Not enough samples to visualize\n    std::cerr << \"Not enough samples to visualize\" << std::endl;\n    return;\n  }\n  if (num_rollouts % dimVisBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by vis block size x (\" << dimVisBlock.x << \")\" << std::endl;\n    return;\n  }\n\n  const int gridsize_x = math::int_ceil(num_rollouts, dimVisBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned shared_mem_size = calcVisualizeKernelSharedMemSize(dynamics, costs, sampling, num_timesteps, dimVisBlock);\n  HANDLE_ERROR(cudaFuncSetAttribute(rolloutKernel<DYN_T, COST_T, SAMPLING_T>,\n                                    cudaFuncAttributeMaxDynamicSharedMemorySize, shared_mem_size));\n  visualizeKernel<DYN_T, COST_T, SAMPLING_T><<<dimGrid, dimVisBlock, shared_mem_size, stream>>>(\n      dynamics->model_d_, sampling->sampling_d_, costs->cost_d_, dt, num_timesteps, num_rollouts, init_x_d, lambda,\n      alpha, y_d, trajectory_costs, crash_status_d);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE>\nvoid launchVisualizeCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                               const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                               float* __restrict__ y_d, int* __restrict__ sampled_crash_status_d,\n                               float* __restrict__ cost_traj_result, dim3 dimBlock, cudaStream_t stream,\n                               bool synchronize)\n{\n  if (num_rollouts <= 1)\n  {  // Not enough samples to visualize\n    std::cerr << \"Not enough samples to visualize\" << std::endl;\n    return;\n  }\n\n  dim3 dimCostGrid(num_rollouts, 1, 1);\n  unsigned shared_mem_size = calcVisCostKernelSharedMemSize(costs, sampling, num_timesteps, dimBlock);\n  visualizeCostKernel<COST_T, SAMPLING_T, COALESCE><<<dimCostGrid, dimBlock, shared_mem_size, stream>>>(\n      costs->cost_d_, sampling->sampling_d_, dt, num_timesteps, num_rollouts, lambda, alpha, y_d, cost_traj_result,\n      sampled_crash_status_d);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <int CONTROL_DIM>\nvoid launchWeightedReductionKernel(const float* __restrict__ exp_costs_d, const float* __restrict__ du_d,\n                                   float* __restrict__ new_u_d, const float normalizer, const int num_timesteps,\n                                   const int num_rollouts, const int sum_stride, cudaStream_t stream, bool synchronize)\n{\n  dim3 dimBlock(math::int_ceil(num_rollouts, sum_stride), 1, 1);\n  dim3 dimGrid(num_timesteps, 1, 1);\n  unsigned shared_mem_size = math::nearest_multiple_4(CONTROL_DIM * dimBlock.x) * sizeof(float);\n  weightedReductionKernel<CONTROL_DIM><<<dimGrid, dimBlock, shared_mem_size, stream>>>(\n      exp_costs_d, du_d, new_u_d, normalizer, num_timesteps, num_rollouts, sum_stride);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\nvoid launchNormExpKernel(int num_rollouts, int blocksize_x, float* trajectory_costs_d, float lambda_inv, float baseline,\n                         cudaStream_t stream, bool synchronize)\n{\n  dim3 dimBlock(blocksize_x, 1, 1);\n  dim3 dimGrid((num_rollouts - 1) / blocksize_x + 1, 1, 1);\n  normExpKernel<<<dimGrid, dimBlock, 0, stream>>>(num_rollouts, trajectory_costs_d, lambda_inv, baseline);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\nvoid launchTsallisKernel(int num_rollouts, int blocksize_x, float* trajectory_costs_d, float gamma, float r,\n                         float baseline, cudaStream_t stream, bool synchronize)\n{\n  dim3 dimBlock(blocksize_x, 1, 1);\n  dim3 dimGrid((num_rollouts - 1) / blocksize_x + 1, 1, 1);\n  TsallisKernel<<<dimGrid, dimBlock, 0, stream>>>(num_rollouts, trajectory_costs_d, gamma, r, baseline);\n  // CudaCheckError();\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <int NUM_ROLLOUTS>\nvoid launchWeightTransformKernel(float* __restrict__ costs_d, float2* __restrict__ baseline_and_norm_d,\n                                 const float lambda_inv, const int num_systems, cudaStream_t stream, bool synchronize)\n{\n  // Figure out max size of threads from the device properties (slows down this method a lot)\n  // int device_id = 0;\n  // cudaDeviceProp deviceProp;\n  // cudaGetDeviceProperties(&deviceProp, device_id);\n  // int blocksize_x = deviceProp.maxThreadsDim[0];\n  const int blocksize_x = 1024;\n  dim3 dimBlock(blocksize_x, 1, 1);\n  // Can't be split into multiple blocks because we want to do all the math in shared memory\n  dim3 dimGrid(1, 1, 1);\n  for (int i = 0; i < num_systems; i++)\n  {\n    fullGPUcomputeWeights<NUM_ROLLOUTS>\n        <<<dimGrid, dimBlock, 0, stream>>>(costs_d + i * NUM_ROLLOUTS, lambda_inv, baseline_and_norm_d + i);\n    HANDLE_ERROR(cudaGetLastError());\n  }\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <class DYN_T, int NUM_ROLLOUTS, int SUM_STRIDE>\nvoid launchweightedReductionKernel(float* exp_costs_d, float* du_d, float* du_new_d, float2* baseline_and_normalizer_d,\n                                   int num_timesteps, cudaStream_t stream, bool synchronize)\n{\n  dim3 dimBlock((NUM_ROLLOUTS - 1) / SUM_STRIDE + 1, 1, 1);\n  dim3 dimGrid(num_timesteps, 1, 1);\n  weightedReductionKernel<DYN_T::CONTROL_DIM, NUM_ROLLOUTS, SUM_STRIDE>\n      <<<dimGrid, dimBlock, 0, stream>>>(exp_costs_d, du_d, du_new_d, baseline_and_normalizer_d, num_timesteps);\n  // CudaCheckError();\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\n/*******************************************************************************************************************\n * Shared Memory Calculation Functions\n *******************************************************************************************************************/\ntemplate <class DYN_T, class SAMPLER_T>\nunsigned calcRolloutDynamicsKernelSharedMemSize(const DYN_T* dynamics, const SAMPLER_T* sampler, dim3& dimBlock)\n{\n  const int dynamics_num_shared = dimBlock.x * dimBlock.z;\n  unsigned dynamics_shared_size =\n      sizeof(float) * (3 * math::nearest_multiple_4(dynamics_num_shared * DYN_T::STATE_DIM) +\n                       math::nearest_multiple_4(dynamics_num_shared * DYN_T::OUTPUT_DIM) +\n                       math::nearest_multiple_4(dynamics_num_shared * DYN_T::CONTROL_DIM)) +\n      calcClassSharedMemSize<DYN_T>(dynamics, dimBlock) + calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_DYN\n  dynamics_shared_size += math::int_multiple_const(dynamics_num_shared * sizeof(barrier), 16);\n#endif\n  return dynamics_shared_size;\n}\n\ntemplate <class COST_T, class SAMPLER_T>\nunsigned calcRolloutCostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, dim3& dimBlock)\n{\n  const int cost_num_shared = dimBlock.x * dimBlock.z;\n  unsigned cost_shared_size = sizeof(float) * (math::nearest_multiple_4(cost_num_shared * COST_T::OUTPUT_DIM) +\n                                               math::nearest_multiple_4(cost_num_shared * COST_T::CONTROL_DIM) +\n                                               math::nearest_multiple_4(cost_num_shared * dimBlock.y)) +\n                              sizeof(int) * math::nearest_multiple_4(cost_num_shared) +\n                              calcClassSharedMemSize(cost, dimBlock) +\n                              calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_COST\n  cost_shared_size += math::int_multiple_const(cost_num_shared * sizeof(barrier), 16);\n#endif\n  return cost_shared_size;\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nunsigned calcRolloutCombinedKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                                dim3& dimBlock)\n{\n  const int num_shared = dimBlock.x * dimBlock.z;\n  unsigned shared_mem_size = sizeof(float) * (3 * math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM) +\n                                              math::nearest_multiple_4(num_shared * DYN_T::OUTPUT_DIM) +\n                                              math::nearest_multiple_4(num_shared * DYN_T::CONTROL_DIM) +\n                                              math::nearest_multiple_4(num_shared * dimBlock.y)) +\n                             sizeof(int) * math::nearest_multiple_4(num_shared) +\n                             calcClassSharedMemSize(dynamics, dimBlock) + calcClassSharedMemSize(cost, dimBlock) +\n                             calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  shared_mem_size += math::int_multiple_const(num_shared * sizeof(barrier), 16);\n#endif\n  return shared_mem_size;\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nunsigned calcVisualizeKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                          const int& num_timesteps, dim3& dimBlock)\n{\n  const int num_shared = dimBlock.x * dimBlock.z;\n  unsigned shared_mem_size = sizeof(float) * (3 * math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM) +\n                                              math::nearest_multiple_4(num_shared * DYN_T::OUTPUT_DIM) +\n                                              math::nearest_multiple_4(num_shared * DYN_T::CONTROL_DIM) +\n                                              math::nearest_multiple_4(num_timesteps * dimBlock.y * dimBlock.z)) +\n                             sizeof(int) * math::nearest_multiple_4(num_shared) +\n                             calcClassSharedMemSize(dynamics, dimBlock) + calcClassSharedMemSize(cost, dimBlock) +\n                             calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  shared_mem_size += math::int_multiple_const(num_shared * sizeof(barrier), 16);\n#endif\n  return shared_mem_size;\n}\n\ntemplate <class COST_T, class SAMPLER_T>\nunsigned calcVisCostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, const int& num_timesteps,\n                                        dim3& dimBlock)\n{\n  const int shared_num = dimBlock.x * dimBlock.z;\n  unsigned shared_mem_size = sizeof(float) * (math::nearest_multiple_4(shared_num * COST_T::OUTPUT_DIM) +\n                                              math::nearest_multiple_4(shared_num * COST_T::CONTROL_DIM) +\n                                              math::nearest_multiple_4(dimBlock.z * num_timesteps * dimBlock.y)) +\n                             sizeof(int) * math::nearest_multiple_4(dimBlock.z * num_timesteps) +\n                             calcClassSharedMemSize(cost, dimBlock) +\n                             calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_COST\n  shared_mem_size += math::int_multiple_const(shared_num * sizeof(barrier), 16);\n#endif\n  return shared_mem_size;\n}\n\ntemplate <class T>\n__host__ __device__ unsigned calcClassSharedMemSize(const T* class_ptr, const dim3& dimBlock)\n{\n  const int num_shared = dimBlock.x * dimBlock.z;\n  unsigned shared_size = math::int_multiple_const(class_ptr->getGrdSharedSizeBytes(), sizeof(float4)) +\n                         num_shared * math::int_multiple_const(class_ptr->getBlkSharedSizeBytes(), sizeof(float4));\n  return shared_size;\n}\n}  // namespace kernels\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/core/mppi_common.cuh",
    "content": "//\n// Created by Manan Gandhi on 12/2/19.\n//\n\n#ifndef MPPIGENERIC_MPPI_COMMON_CUH\n#define MPPIGENERIC_MPPI_COMMON_CUH\n\n#include <mppi/utils/math_utils.h>\n\nnamespace mppi\n{\nnamespace kernels\n{\n/*******************************************************************************************************************\n * Kernel functions\n *******************************************************************************************************************/\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE = true>\n__global__ void rolloutCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                                  const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                  const float* __restrict__ y_d, float* __restrict__ trajectory_costs_d);\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T>\n__global__ void rolloutKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling,\n                              COST_T* __restrict__ costs, float dt, const int num_timesteps, const int num_rollouts,\n                              const float* __restrict__ init_x_d, float lambda, float alpha,\n                              float* __restrict__ trajectory_costs_d);\n\ntemplate <class DYN_T, class SAMPLING_T>\n__global__ void rolloutDynamicsKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling, float dt,\n                                      const int num_timesteps, const int num_rollouts,\n                                      const float* __restrict__ init_x_d, float* __restrict__ y_d);\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T>\n__global__ void visualizeKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling,\n                                COST_T* __restrict__ costs, float dt, const int num_timesteps, const int num_rollouts,\n                                const float* __restrict__ init_x_d, float lambda, float alpha, float* __restrict__ y_d,\n                                float* __restrict__ cost_traj_d, int* __restrict__ crash_status_d);\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE = true>\n__global__ void visualizeCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                                    const int num_timesteps, const int num_rollouts, const float lambda, float alpha,\n                                    const float* __restrict__ y_d, float* __restrict__ cost_traj_d,\n                                    int* __restrict__ crash_status_d);\n\ntemplate <int CONTROL_DIM>\n__global__ void weightedReductionKernel(const float* __restrict__ exp_costs_d, const float* __restrict__ du_d,\n                                        float* __restrict__ new_u_d, const float normalizer, const int num_timesteps,\n                                        const int num_rollouts, const int sum_stride);\n\n// Norm Exponential Kernel\n__global__ void normExpKernel(int num_rollouts, float* trajectory_costs_d, float gamma, float baseline);\n// Tsallis Kernel\n__global__ void TsallisKernel(int num_rollouts, float* trajectory_costs_d, float gamma, float r, float baseline);\n\n/*******************************************************************************************************************\n * RolloutKernel Helpers\n *******************************************************************************************************************/\n/*\n * loadGlobalToShared\n * Copy global memory into shared memory\n *\n * Args:\n * state_dim: Number of states, defined in DYN_T\n * control_dim: Number of controls, defined in DYN_T\n * num_rollouts: Total number of rollouts\n * blocksize_y: Y dimension of each block of threads\n * global_idx: Current rollout index.\n * thread_idy: Current y index of block dimension.\n * thread_idz: Current z index of block dimension.\n * x0_device: initial condition in device memory\n * x_thread: state in shared memory\n * xdot_thread: state_dot in shared memory\n * u_thread: control / perturbed control in shared memory\n *\n */\ntemplate <int STATE_DIM, int CONTROL_DIM>\n__device__ void loadGlobalToShared(const int num_rollouts, const int blocksize_y, const int global_idx,\n                                   const int thread_idy, const int thread_idz, const float* __restrict__ x_device,\n                                   float* __restrict__ x_thread, float* __restrict__ xdot_thread,\n                                   float* __restrict__ u_thread);\n\n/**\n * @brief Calculate the terminal cost and add it to the trajectories' overall cost\n *\n * @tparam COST_T - Cost Function class\n * @param num_rollouts\n * @param num_timesteps\n * @param global_idx - sample trajectory index\n * @param costs - GPU version of cost function\n * @param x_thread - terminal state x\n * @param running_cost - current cost of the sample trajectory\n * @param theta_c - shared memory for the cost function\n * @param cost_rollouts_device - global memory array storing the cost of each sample\n *\n * @return\n */\ntemplate <class COST_T>\n__device__ void computeAndSaveCost(int num_rollouts, int num_timesteps, int global_idx, COST_T* costs, float* x_thread,\n                                   float running_cost, float* theta_c, float* cost_rollouts_device);\n\n/**\n * @brief conduct a warp reduction of addition s[tid * stride] += s[(tid + BLOCKSIZE) * stride]\n *\n * @tparam BLOCKSIZE - how many threads are doing the reduction\n * @param sdata - float array to do the reduction on\n * @param tid - current thread index\n * @param stride - how spaced out the summations should be\n *\n * @return\n */\ntemplate <int BLOCKSIZE>\n__device__ void warpReduceAdd(volatile float* sdata, const int tid, const int stride = 1);\n\n/**\n * @brief conduct a sum of floats in array through a GPU reduction algorithm\n *\n * @param running_cost - array of floats to be summed\n * @param start_size - number of items to be summed\n * @param index - GPU thread index\n * @param step - GPU step to avoid overlap of threads\n * @param catch_condition - when to stop summation\n * @param stride - how far apart the desired floats are in the array\n *\n * @return\n */\n__device__ inline void costArrayReduction(float* running_cost, const int start_size, const int index, const int step,\n                                          const bool catch_condition, const int stride = 1);\n\n// Norm Exp Kernel Helpers\n__device__ __host__ inline void normExpTransform(const int num_rollouts, float* __restrict__ trajectory_costs_d,\n                                                 const float lambda_inv, const float baseline, const int global_idx,\n                                                 const int rollout_idx_step);\n// Tsallis Kernel Helpers\n__device__ __host__ inline void TsallisTransform(const int num_rollouts, float* __restrict__ trajectory_costs_d,\n                                                 const float gamma, float r, const float baseline, const int global_idx,\n                                                 const int rollout_idx_step);\nfloat computeBaselineCost(float* cost_rollouts_host, int num_rollouts);\n\nfloat computeNormalizer(float* cost_rollouts_host, int num_rollouts);\n\nfloat constructBestWeights(float* cost_rollouts_host, int num_rollouts);\n\nint computeBestIndex(float* cost_rollouts_host, int num_rollouts);\n\n/**\n * Calculates the free energy mean and variance from the different\n * cost trajectories after normExpKernel\n * Inputs:\n *  cost_rollouts_host - sampled cost trajectories\n *  num_rollouts - the number of sampled cost trajectories\n *  lambda - the lambda term from the definition of free energy\n *  baseline - minimum cost trajectory\n * Outputs:\n *  free_energy - the free energy of the samples\n *  free_energy_var - the variance of the free energy calculation\n */\nvoid computeFreeEnergy(float& free_energy, float& free_energy_var, float& free_energy_modified,\n                       float* cost_rollouts_host, int num_rollouts, float baseline, float lambda = 1.0);\n\n/*******************************************************************************************************************\n * Weighted Reduction Kernel Helpers\n *******************************************************************************************************************/\n/**\n * @brief set controls to zero\n *\n * @param control_dim\n * @param thread_idx - threadIdx.x\n * @param u - memory for control array\n * @param u_intermediate - shared memory for control array\n *\n * @return\n */\n__device__ void setInitialControlToZero(int control_dim, int thread_idx, float* __restrict__ u,\n                                        float* __restrict__ u_intermediate);\n\n/**\n * @brief calculated the weighted sum of the controls\n *\n * @param num_rollouts\n * @param num_timesteps\n * @param sum_stride - how many summations to do in a single thread\n * @param thread_idx - threadIdx.x\n * @param block_idx - blockIdx.x\n * @param control_dim\n * @param exp_costs_d - global memory of the weights\n * @param normalizer - sum of all the weights to use as a normalizing term\n * @param du_d - global memory of all sampled controls\n * @param u - local memory to store a control from a single time\n * @param u_intermediate - shared memory containing the weighted sum\n *\n * @return\n */\n__device__ void strideControlWeightReduction(const int num_rollouts, const int num_timesteps, const int sum_stride,\n                                             const int thread_idx, const int block_idx, const int control_dim,\n                                             const float* __restrict__ exp_costs_d, const float normalizer,\n                                             const float* __restrict__ du_d, float* __restrict__ u,\n                                             float* __restrict__ u_intermediate);\n\n__device__ void rolloutWeightReductionAndSaveControl(int thread_idx, int block_idx, int num_rollouts, int num_timesteps,\n                                                     int control_dim, int sum_stride, float* u, float* u_intermediate,\n                                                     float* du_new_d);\n\n/**\n * Launch Kernel Methods\n **/\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T, bool COALESCE = true>\nvoid launchSplitRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                              SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                              const int num_rollouts, float lambda, float alpha, float* __restrict__ init_x_d,\n                              float* __restrict__ y_d, float* __restrict__ trajectory_costs, dim3 dimDynBlock,\n                              dim3 dimCostBlock, cudaStream_t stream, bool synchronize = true);\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T>\nvoid launchRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling,\n                         float dt, const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                         float* __restrict__ init_x_d, float* __restrict__ trajectory_costs, dim3 dimBlock,\n                         cudaStream_t stream, bool synchronize = true);\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE = true>\nvoid launchVisualizeCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                               const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                               float* __restrict__ y_d, int* __restrict__ sampled_crash_status_d,\n                               float* __restrict__ cost_traj_result, dim3 dimBlock, cudaStream_t stream,\n                               bool synchronize = true);\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T>\nvoid launchVisualizeKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling,\n                           float dt, const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                           float* __restrict__ init_x_d, float* __restrict__ y_d, float* __restrict__ trajectory_costs,\n                           int* __restrict__ crash_status_d, dim3 dimVisBlock, cudaStream_t stream,\n                           bool synchronize = true);\n\ntemplate <int CONTROL_DIM>\nvoid launchWeightedReductionKernel(const float* __restrict__ exp_costs_d, const float* __restrict__ du_d,\n                                   float* __restrict__ new_u_d, const float normalizer, const int num_timesteps,\n                                   const int num_rollouts, const int sum_stride, cudaStream_t stream,\n                                   bool synchronize = true);\n\nvoid launchNormExpKernel(int num_rollouts, int blocksize_x, float* trajectory_costs_d, float lambda_inv, float baseline,\n                         cudaStream_t stream, bool synchronize = true);\n\nvoid launchTsallisKernel(int num_rollouts, int blocksize_x, float* trajectory_costs_d, float gamma, float r,\n                         float baseline, cudaStream_t stream, bool synchronize = true);\n\ntemplate <class DYN_T, int NUM_ROLLOUTS, int SUM_STRIDE>\nvoid launchWeightedReductionKernel(float* exp_costs_d, float* du_d, float* du_new_d, float normalizer,\n                                   int num_timesteps, cudaStream_t stream, bool synchronize = true);\n\n/*******************************************************************************************************************\n * Shared Memory Calculators for various kernels\n *******************************************************************************************************************/\ntemplate <class DYN_T, class SAMPLER_T>\nunsigned calcRolloutDynamicsKernelSharedMemSize(const DYN_T* dynamics, const SAMPLER_T* sampler, dim3& dimBlock);\n\ntemplate <class COST_T, class SAMPLER_T>\nunsigned calcRolloutCostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, dim3& dimBlock);\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nunsigned calcRolloutCombinedKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                                dim3& dimBlock);\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nunsigned calcVisualizeKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                          const int& num_timesteps, dim3& dimBlock);\n\ntemplate <class COST_T, class SAMPLER_T>\nunsigned calcVisCostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, const int& num_timesteps,\n                                        dim3& dimBlock);\n\ntemplate <class T>\n__host__ __device__ inline unsigned calcClassSharedMemSize(const T* class_ptr, const dim3& dimBlock);\n}  // namespace kernels\n}  // namespace mppi\n\n#if __CUDACC__\n#include \"mppi_common.cu\"\n#endif\n\n#endif  // MPPIGENERIC_MPPI_COMMON_CUH\n"
  },
  {
    "path": "include/mppi/core/rmppi_kernels.cu",
    "content": "#include <mppi/core/mppi_common.cuh>\n\nnamespace mp1 = mppi::p1;\n\nnamespace mppi\n{\nnamespace kernels\n{\nnamespace rmppi\n{\ntemplate <class DYN_T, class SAMPLING_T>\n__global__ void initEvalDynKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling, float dt,\n                                  const int num_timesteps, const int num_rollouts, int samples_per_condition,\n                                  const int* __restrict__ strides_d, const float* __restrict__ states_d,\n                                  float* __restrict__ y_d)\n{\n  // Get thread and block id\n  const int global_idx = blockDim.x * blockIdx.x + threadIdx.x;\n  const int distribution_idx = threadIdx.z;\n  const int candidate_idx = global_idx / samples_per_condition;\n  const int candidate_sample_idx = global_idx % samples_per_condition;\n  const int tdy = threadIdx.y;\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n\n  const int shared_idx = blockDim.x * threadIdx.z + threadIdx.x;\n  const int distribution_dim = blockDim.z;\n  const int sample_dim = blockDim.x;\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(sample_dim * DYN_T::OUTPUT_DIM * distribution_dim)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(sample_dim * DYN_T::CONTROL_DIM * distribution_dim)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_DYN\n  barrier* barrier_shared = (barrier*)&theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n#endif\n\n  // Create local state, state dot and controls\n  float* x = &(reinterpret_cast<float*>(x_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_next = &(reinterpret_cast<float*>(x_next_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_temp;\n  float* xdot = &(reinterpret_cast<float*>(x_dot_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* u = &(reinterpret_cast<float*>(u_shared)[shared_idx * DYN_T::CONTROL_DIM]);\n  float* y = &(reinterpret_cast<float*>(y_shared)[shared_idx * DYN_T::OUTPUT_DIM]);\n\n  // Load global array to shared array\n  mppi::p1::loadArrayParallel<DYN_T::STATE_DIM>(x, 0, states_d, candidate_idx * DYN_T::STATE_DIM);\n  int stride = strides_d[candidate_idx];\n  for (int i = tdy; i < DYN_T::CONTROL_DIM; i += blockDim.y)\n  {\n    u[i] = 0;\n  }\n  for (int i = tdy; i < DYN_T::OUTPUT_DIM; i += blockDim.y)\n  {\n    y[i] = 0;\n  }\n  __syncthreads();\n\n#ifdef USE_CUDA_BARRIERS_DYN\n  barrier* bar = &barrier_shared[(shared_idx)];\n  if (tdy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0, dt);\n  sampling->initializeDistributions(y, 0.0, dt, theta_d_shared);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    int candidate_t = min(t + stride, num_timesteps - 1);\n    sampling->readControlSample(candidate_sample_idx, candidate_t, distribution_idx, u, theta_d_shared, blockDim.y, tdy,\n                                y);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    // usually just control clamping\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n    // Copy state to global memory\n    int sample_time_offset = (num_rollouts * threadIdx.z + global_idx) * num_timesteps + t;\n    mp1::loadArrayParallel<DYN_T::OUTPUT_DIM>(y_d, sample_time_offset * DYN_T::OUTPUT_DIM, y, 0);\n  }\n}\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE>\n__global__ void initEvalCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                                   const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                   const int samples_per_condition, const int* __restrict__ strides_d,\n                                   const float* __restrict__ y_d, float* __restrict__ trajectory_costs_d)\n{\n  // Get thread and block id\n  const int global_idx = blockIdx.x;\n  const int distribution_idx = threadIdx.z;\n  const int candidate_idx = global_idx / samples_per_condition;\n  const int candidate_sample_idx = global_idx % samples_per_condition;\n\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int shared_idx = blockDim.x * threadIdx.z + threadIdx.x;\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n  float* y_shared = entire_buffer;\n  float* u_shared = &y_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::OUTPUT_DIM)];\n  float* running_cost_shared = &u_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::CONTROL_DIM)];\n  int* crash_status_shared = (int*)&running_cost_shared[math::nearest_multiple_4(blockDim.x * blockDim.z * blockDim.y)];\n  float* theta_c = (float*)&crash_status_shared[math::nearest_multiple_4(blockDim.x * blockDim.z)];\n  float* theta_d = &theta_c[size_of_theta_c_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* barrier_shared = (barrier*)&theta_d[size_of_theta_d_bytes / sizeof(float)];\n#endif\n\n  // Create local state, state dot and controls\n  float* y;\n  float* u;\n  int* crash_status;\n\n  // Initialize running cost and total cost\n  float* running_cost;\n  int sample_time_offset = 0;\n  int stride = strides_d[candidate_idx];\n\n  // Load global array to shared array\n  y = &y_shared[(shared_idx)*COST_T::OUTPUT_DIM];\n  u = &u_shared[(shared_idx)*COST_T::CONTROL_DIM];\n  crash_status = &crash_status_shared[shared_idx];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n  const int running_cost_index = thread_idx + blockDim.x * (thread_idy + blockDim.y * thread_idz);\n  running_cost = &running_cost_shared[running_cost_index];\n  running_cost[0] = 0;\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* bar = &barrier_shared[(blockDim.x * thread_idz + thread_idx)];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  /*<----Start of simulation loop-----> */\n  const int max_time_iters = ceilf((float)num_timesteps / blockDim.x);\n  costs->initializeCosts(y, u, theta_c, 0.0, dt);\n  sampling->initializeDistributions(y, 0.0, dt, theta_d);\n  __syncthreads();\n  for (int time_iter = 0; time_iter < max_time_iters; ++time_iter)\n  {\n    int t = thread_idx + time_iter * blockDim.x;\n    if (COALESCE)\n    {  // Fill entire shared mem sequentially using sequential threads_idx\n      int amount_to_fill = (time_iter + 1) * blockDim.x > num_timesteps ? num_timesteps % blockDim.x : blockDim.x;\n      mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_XY>(\n          y_shared, blockDim.x * thread_idz * COST_T::OUTPUT_DIM, y_d,\n          ((num_rollouts * thread_idz + global_idx) * num_timesteps + time_iter * blockDim.x) * COST_T::OUTPUT_DIM,\n          COST_T::OUTPUT_DIM * amount_to_fill);\n    }\n    else if (t < num_timesteps)\n    {  // t = num_timesteps is the terminal state for outside this for-loop\n      sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n      mp1::loadArrayParallel<COST_T::OUTPUT_DIM>(y, 0, y_d, sample_time_offset * COST_T::OUTPUT_DIM);\n    }\n    if (t < num_timesteps)\n    {  // load controls from t = 0 to t = num_timesteps - 1\n      int candidate_t = min(t + stride, num_timesteps - 1);\n      sampling->readControlSample(candidate_sample_idx, candidate_t, distribution_idx, u, theta_d, blockDim.y,\n                                  thread_idy, y);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Compute cost\n    if (t < num_timesteps)\n    {\n      running_cost[0] +=\n          costs->computeRunningCost(y, u, t, theta_c, crash_status) +\n          sampling->computeLikelihoodRatioCost(u, theta_d, global_idx, t, distribution_idx, lambda, alpha);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n  }\n\n  // Add all costs together\n  running_cost = &running_cost_shared[blockDim.x * blockDim.y * thread_idz];\n  __syncthreads();\n  costArrayReduction(running_cost, blockDim.x * blockDim.y, thread_idx + blockDim.x * thread_idy,\n                     blockDim.x * blockDim.y, thread_idx == blockDim.x - 1 && thread_idy == 0);\n  // point every thread to the last output at t = NUM_TIMESTEPS for terminal cost calculation\n  const int last_y_index = (num_timesteps - 1) % blockDim.x;\n  y = &y_shared[(blockDim.x * thread_idz + last_y_index) * COST_T::OUTPUT_DIM];\n  // Compute terminal cost and the final cost for each thread\n  computeAndSaveCost(num_rollouts, num_timesteps, global_idx, costs, y, running_cost[0] / (num_timesteps), theta_c,\n                     trajectory_costs_d);\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T>\n__global__ void initEvalKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                               SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                               const int num_rollouts, int samples_per_condition, float lambda, float alpha,\n                               const int* __restrict__ strides_d, const float* __restrict__ states_d,\n                               float* __restrict__ trajectory_costs_d)\n{\n  // Get thread and block id\n  const int global_idx = blockDim.x * blockIdx.x + threadIdx.x;\n  const int distribution_idx = threadIdx.z;\n  const int candidate_idx = global_idx / samples_per_condition;\n  const int candidate_sample_idx = global_idx % samples_per_condition;\n  const int tdy = threadIdx.y;\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n\n  const int shared_idx = blockDim.x * threadIdx.z + threadIdx.x;\n  const int distribution_dim = blockDim.z;\n  const int sample_dim = blockDim.x;\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(sample_dim * DYN_T::OUTPUT_DIM * distribution_dim)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(sample_dim * DYN_T::CONTROL_DIM * distribution_dim)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n  float* theta_c_shared = &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n  float* running_cost_shared = &theta_c_shared[size_of_theta_c_bytes / sizeof(float)];\n  int* crash_status_shared = (int*)&running_cost_shared[math::nearest_multiple_4(blockDim.x * blockDim.y * blockDim.z)];\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* barrier_shared = (barrier*)&crash_status_shared[math::nearest_multiple_4(sample_dim * distribution_dim)];\n#endif\n\n  // Create local state, state dot and controls\n  int running_cost_index = threadIdx.x + blockDim.x * (tdy + blockDim.y * threadIdx.z);\n  float* x = &(reinterpret_cast<float*>(x_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_next = &(reinterpret_cast<float*>(x_next_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_temp;\n  float* xdot = &(reinterpret_cast<float*>(x_dot_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* u = &(reinterpret_cast<float*>(u_shared)[shared_idx * DYN_T::CONTROL_DIM]);\n  float* y = &(reinterpret_cast<float*>(y_shared)[shared_idx * DYN_T::OUTPUT_DIM]);\n  float* running_cost = &running_cost_shared[running_cost_index];\n  running_cost[0] = 0.0f;\n  int* crash_status = &crash_status_shared[shared_idx];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n\n  // Load global array to shared array\n  mppi::p1::loadArrayParallel<DYN_T::STATE_DIM>(x, 0, states_d, candidate_idx * DYN_T::STATE_DIM);\n  int stride = strides_d[candidate_idx];\n  int p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  for (int i = p_index; i < DYN_T::CONTROL_DIM; i += step)\n  {\n    u[i] = 0;\n  }\n  for (int i = p_index; i < DYN_T::OUTPUT_DIM; i += step)\n  {\n    y[i] = 0;\n  }\n  __syncthreads();\n\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* bar = &barrier_shared[(shared_idx)];\n  if (tdy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0, dt);\n  sampling->initializeDistributions(y, 0.0, dt, theta_d_shared);\n  costs->initializeCosts(y, u, theta_c_shared, 0.0f, dt);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    int candidate_t = min(t + stride, num_timesteps - 1);\n    sampling->readControlSample(candidate_sample_idx, candidate_t, distribution_idx, u, theta_d_shared, blockDim.y, tdy,\n                                y);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    running_cost[0] +=\n        costs->computeRunningCost(y, u, t, theta_c_shared, crash_status) +\n        sampling->computeLikelihoodRatioCost(u, theta_d_shared, global_idx, t, distribution_idx, lambda, alpha);\n\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n  }\n  // Add all costs together\n  running_cost = &running_cost_shared[threadIdx.x + blockDim.x * blockDim.y * threadIdx.z];\n  __syncthreads();\n  costArrayReduction(running_cost, blockDim.y, tdy, blockDim.y, tdy == 0, blockDim.x);\n\n  // Compute terminal cost and the final cost for each thread\n  computeAndSaveCost(num_rollouts, num_timesteps, global_idx, costs, y, running_cost[0] / (num_timesteps),\n                     theta_c_shared, trajectory_costs_d);\n}\n\ntemplate <class DYN_T, class FB_T, class SAMPLING_T, int NOMINAL_STATE_IDX>\n__global__ void rolloutRMPPIDynamicsKernel(DYN_T* __restrict__ dynamics, FB_T* __restrict__ fb_controller,\n                                           SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                                           const int num_rollouts, const float* __restrict__ init_x_d,\n                                           float* __restrict__ y_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int block_idx = blockIdx.x;\n  const int global_idx = blockDim.x * block_idx + thread_idx;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int shared_nom_idx = blockDim.x * NOMINAL_STATE_IDX + thread_idx;\n  const int distribution_idx = threadIdx.z;\n  const int distribution_dim = blockDim.z;\n  const int sample_dim = blockDim.x;\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_fb_bytes = calcClassSharedMemSize(fb_controller, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(sample_dim * DYN_T::OUTPUT_DIM * distribution_dim)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(sample_dim * DYN_T::STATE_DIM * distribution_dim)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(sample_dim * DYN_T::CONTROL_DIM * distribution_dim)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n  float* theta_fb = &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_DYN\n  barrier* barrier_shared = (barrier*)&theta_fb[size_of_theta_fb_bytes / sizeof(float)];\n#endif\n\n  // Create local state, state dot and controls\n  float* x = &(reinterpret_cast<float*>(x_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_next = &(reinterpret_cast<float*>(x_next_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_nom = &(reinterpret_cast<float*>(x_shared)[shared_nom_idx * DYN_T::STATE_DIM]);\n  float* x_nom_next = &(reinterpret_cast<float*>(x_next_shared)[shared_nom_idx * DYN_T::STATE_DIM]);\n  float* x_temp;\n  float* xdot = &(reinterpret_cast<float*>(x_dot_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* u = &(reinterpret_cast<float*>(u_shared)[shared_idx * DYN_T::CONTROL_DIM]);\n  float* y = &(reinterpret_cast<float*>(y_shared)[shared_idx * DYN_T::OUTPUT_DIM]);\n#ifdef USE_CUDA_BARRIERS_DYN\n  barrier* bar = &barrier_shared[shared_idx];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n  // The array to hold K(x,x*)\n  float fb_control[DYN_T::CONTROL_DIM];\n  int i;\n\n  // Load global array to shared array\n  ::mppi::kernels::loadGlobalToShared<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM>(\n      num_rollouts, blockDim.y, global_idx, thread_idy, thread_idz, init_x_d, x, xdot, u);\n  __syncthreads();\n\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0, dt);\n  sampling->initializeDistributions(y, 0.0, dt, theta_d_shared);\n  fb_controller->initializeFeedback(x, u, theta_fb, 0.0, dt);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    sampling->readControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Now find feedback control\n    for (i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      fb_control[i] = 0;\n    }\n\n    // we do not apply feedback on the nominal state\n    if (thread_idz != NOMINAL_STATE_IDX)\n    {\n      fb_controller->k(x, x_nom, t, theta_fb, fb_control);\n    }\n\n    for (i = thread_idy; i < DYN_T::CONTROL_DIM; i += blockDim.y)\n    {\n      u[i] += fb_control[i];\n    }\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    // usually just control clamping\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    // copy back feedback-filled controls to global memory\n    sampling->writeControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_DYN\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n    x_temp = x_nom;\n    x_nom = x_nom_next;\n    x_nom_next = x_temp;\n    // Copy state to global memory\n    int sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n    mp1::loadArrayParallel<DYN_T::OUTPUT_DIM>(y_d, sample_time_offset * DYN_T::OUTPUT_DIM, y, 0);\n  }\n}\n\ntemplate <class COST_T, class DYN_T, class SAMPLING_T, class FB_T, int NOMINAL_STATE_IDX, bool COALESCE>\n__global__ void rolloutRMPPICostKernel(COST_T* __restrict__ costs, DYN_T* __restrict__ dynamics,\n                                       FB_T* __restrict__ fb_controller, SAMPLING_T* __restrict__ sampling, float dt,\n                                       const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                       float value_func_threshold, const float* __restrict__ init_x_d,\n                                       const float* __restrict__ y_d, float* __restrict__ trajectory_costs_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int global_idx = blockIdx.x;\n  const int distribution_idx = threadIdx.z;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int num_shared = blockDim.x * blockDim.z;\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n  const int size_of_theta_fb_bytes = calcClassSharedMemSize(fb_controller, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n  float* y_shared = entire_buffer;\n  float* u_shared = &y_shared[math::nearest_multiple_4(num_shared * COST_T::OUTPUT_DIM)];\n  float* running_cost_shared = &u_shared[math::nearest_multiple_4(num_shared * COST_T::CONTROL_DIM)];\n  int* crash_status_shared = (int*)&running_cost_shared[math::nearest_multiple_4(num_shared * 2 * blockDim.y)];\n  float* theta_c = (float*)&crash_status_shared[math::nearest_multiple_4(num_shared)];\n  float* theta_d = &theta_c[size_of_theta_c_bytes / sizeof(float)];\n  float* theta_fb = &theta_d[size_of_theta_d_bytes / sizeof(float)];\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* barrier_shared = (barrier*)&theta_fb[size_of_theta_fb_bytes / sizeof(float)];\n#endif\n\n  // Initialize running cost and total cost\n  int sample_time_offset = 0;\n\n  // The array to hold K(x,x*)\n  float fb_control[COST_T::CONTROL_DIM];\n\n  // Load global array to shared array\n  float* y = &y_shared[shared_idx * COST_T::OUTPUT_DIM];\n  float* y_nom = &y_shared[(blockDim.x * NOMINAL_STATE_IDX + thread_idx) * COST_T::OUTPUT_DIM];\n  float* u = &u_shared[shared_idx * COST_T::CONTROL_DIM];\n  int* crash_status = &crash_status_shared[shared_idx];\n  const int cost_index = blockDim.x * (thread_idz * blockDim.y + thread_idy) + thread_idx;\n  float* running_cost = &running_cost_shared[cost_index];\n  float* running_cost_extra = &running_cost_shared[cost_index + num_shared * blockDim.y];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n  running_cost[0] = 0;\n  running_cost_extra[0] = 0;\n#ifdef USE_CUDA_BARRIERS_COST\n  barrier* bar = &barrier_shared[(blockDim.x * thread_idz + thread_idx)];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n\n  float curr_cost = 0.0f;\n\n  /*<----Start of simulation loop-----> */\n  const int max_time_iters = ceilf((float)num_timesteps / blockDim.x);\n  costs->initializeCosts(y, u, theta_c, 0.0, dt);\n  sampling->initializeDistributions(y, 0.0, dt, theta_d);\n  fb_controller->initializeFeedback(y, u, theta_fb, 0.0, dt);\n  __syncthreads();\n  for (int time_iter = 0; time_iter < max_time_iters; ++time_iter)\n  {\n    int t = thread_idx + time_iter * blockDim.x;\n    if (COALESCE)\n    {  // Fill entire shared mem sequentially using sequential threads_idx\n      int amount_to_fill = (time_iter + 1) * blockDim.x > num_timesteps ? num_timesteps % blockDim.x : blockDim.x;\n      mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_XY>(\n          y_shared, blockDim.x * thread_idz * COST_T::OUTPUT_DIM, y_d,\n          ((num_rollouts * thread_idz + global_idx) * num_timesteps + time_iter * blockDim.x) * COST_T::OUTPUT_DIM,\n          COST_T::OUTPUT_DIM * amount_to_fill);\n    }\n    else if (t < num_timesteps)\n    {  // t = num_timesteps is the terminal state for outside this for-loop\n      sample_time_offset = (num_rollouts * thread_idz + global_idx) * num_timesteps + t;\n      mp1::loadArrayParallel<COST_T::OUTPUT_DIM>(y, 0, y_d, sample_time_offset * COST_T::OUTPUT_DIM);\n    }\n    if (t < num_timesteps)\n    {  // load controls from t = 0 to t = num_timesteps - 1\n      sampling->readControlSample(global_idx, t, distribution_idx, u, theta_d, blockDim.y, thread_idy, y);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Get feedback\n    float x[DYN_T::STATE_DIM];\n    float x_nom[DYN_T::STATE_DIM];\n    if (t < num_timesteps)\n    {\n      dynamics->outputToState(y, x);\n      dynamics->outputToState(y_nom, x_nom);\n      fb_controller->k(x, x_nom, t, theta_fb, fb_control);\n    }\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Compute cost\n    if (t < num_timesteps)\n    {\n      curr_cost = costs->computeRunningCost(y, u, t, theta_c, crash_status);\n    }\n\n    if (thread_idz == NOMINAL_STATE_IDX && t < num_timesteps)\n    {\n      running_cost[0] += curr_cost;\n      running_cost_extra[0] +=\n          sampling->computeLikelihoodRatioCost(u, theta_d, global_idx, t, distribution_idx, lambda, alpha);\n    }\n\n    if (thread_idz != NOMINAL_STATE_IDX && t < num_timesteps)\n    {\n      running_cost[0] +=\n          curr_cost + sampling->computeLikelihoodRatioCost(u, theta_d, global_idx, t, distribution_idx, lambda, alpha);\n      running_cost_extra[0] +=\n          curr_cost + sampling->computeFeedbackCost(fb_control, theta_d, t, distribution_idx, lambda, alpha);\n    }\n\n    // We need running_state_cost_nom and running_control_cost_nom for the nominal system\n    // We need running_cost_real and running_cost_feedback for the real system\n    // Nominal system needs to know running_state_cost_nom, running_control_cost_nom, and running_cost_feedback\n    // Real system needs to know running_cost_real\n\n#ifdef USE_CUDA_BARRIERS_COST\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n  }\n\n  // Add all costs together\n  running_cost = &running_cost_shared[blockDim.x * blockDim.y * thread_idz];\n  running_cost_extra = &running_cost_shared[blockDim.x * blockDim.y * (blockDim.z + thread_idz)];\n  __syncthreads();\n\n  multiCostArrayReduction(running_cost, running_cost_extra, blockDim.x * blockDim.y,\n                          thread_idx + blockDim.x * thread_idy, blockDim.x * blockDim.y,\n                          thread_idx == blockDim.x - 1 && thread_idy == 0);\n\n  // point every thread to the last output at t = NUM_TIMESTEPS for terminal cost calculation\n  const int last_y_index = (num_timesteps - 1) % blockDim.x;\n  y = &y_shared[(blockDim.x * thread_idz + last_y_index) * COST_T::OUTPUT_DIM];\n  if (thread_idx == 0 && thread_idy == 0)\n  {\n    *running_cost += costs->terminalCost(y, theta_c);\n    if (thread_idz != NOMINAL_STATE_IDX)\n    {\n      *running_cost_extra += costs->terminalCost(y, theta_c);\n    }\n    *running_cost /= ((float)num_timesteps);\n    *running_cost_extra /= ((float)num_timesteps);\n  }\n  __syncthreads();\n  if (thread_idz != NOMINAL_STATE_IDX && thread_idx == 0 && thread_idy == 0)\n  {\n    float* running_nom_cost = &running_cost_shared[blockDim.x * blockDim.y * NOMINAL_STATE_IDX];\n    const float* running_nom_cost_likelihood_ratio_cost =\n        &running_cost_shared[blockDim.x * blockDim.y * (blockDim.z + NOMINAL_STATE_IDX)];\n    *running_nom_cost =\n        0.5 * *running_nom_cost + 0.5 * fmaxf(fminf(*running_cost_extra, value_func_threshold), *running_nom_cost);\n    *running_nom_cost += *running_nom_cost_likelihood_ratio_cost;\n  }\n  __syncthreads();\n  if (thread_idy == 0 && thread_idx == 0)\n  {\n    trajectory_costs_d[global_idx + thread_idz * num_rollouts] = running_cost[0];\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class FB_T, class SAMPLING_T, int NOMINAL_STATE_IDX>\n__global__ void rolloutRMPPIKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                                   FB_T* __restrict__ fb_controller, SAMPLING_T* __restrict__ sampling, float dt,\n                                   const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                   float value_func_threshold, const float* __restrict__ init_x_d,\n                                   float* __restrict__ trajectory_costs_d)\n{\n  // Get thread and block id\n  const int thread_idx = threadIdx.x;\n  const int thread_idy = threadIdx.y;\n  const int thread_idz = threadIdx.z;\n  const int block_idx = blockIdx.x;\n  const int global_idx = blockDim.x * block_idx + thread_idx;\n  const int shared_idx = blockDim.x * thread_idz + thread_idx;\n  const int shared_nom_idx = blockDim.x * NOMINAL_STATE_IDX + thread_idx;\n  const int distribution_idx = threadIdx.z;\n  const int num_shared = blockDim.x * blockDim.z;\n  const int size_of_theta_s_bytes = calcClassSharedMemSize(dynamics, blockDim);\n  const int size_of_theta_d_bytes = calcClassSharedMemSize(sampling, blockDim);\n  const int size_of_theta_c_bytes = calcClassSharedMemSize(costs, blockDim);\n  const int size_of_theta_fb_bytes = calcClassSharedMemSize(fb_controller, blockDim);\n\n  // Create shared state and control arrays\n  extern __shared__ float entire_buffer[];\n\n  float* x_shared = entire_buffer;\n  float* x_next_shared = &x_shared[math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM)];\n  float* y_shared = &x_next_shared[math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM)];\n  float* x_dot_shared = &y_shared[math::nearest_multiple_4(num_shared * DYN_T::OUTPUT_DIM)];\n  float* u_shared = &x_dot_shared[math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM)];\n  float* theta_s_shared = &u_shared[math::nearest_multiple_4(num_shared * DYN_T::CONTROL_DIM)];\n  float* theta_d_shared = &theta_s_shared[size_of_theta_s_bytes / sizeof(float)];\n  float* theta_c_shared = &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];\n  float* theta_fb = &theta_c_shared[size_of_theta_c_bytes / sizeof(float)];\n  float* running_cost_shared = &theta_fb[size_of_theta_fb_bytes / sizeof(float)];\n  int* crash_status_shared = (int*)&running_cost_shared[math::nearest_multiple_4(num_shared * 2 * blockDim.y)];\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* barrier_shared = (barrier*)&crash_status_shared[math::nearest_multiple_4(num_shared)];\n#endif\n\n  // Create local state, state dot and controls\n  float* x = &(reinterpret_cast<float*>(x_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_next = &(reinterpret_cast<float*>(x_next_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* x_nom = &(reinterpret_cast<float*>(x_shared)[shared_nom_idx * DYN_T::STATE_DIM]);\n  float* x_nom_next = &(reinterpret_cast<float*>(x_next_shared)[shared_nom_idx * DYN_T::STATE_DIM]);\n  float* x_temp;\n  float* xdot = &(reinterpret_cast<float*>(x_dot_shared)[shared_idx * DYN_T::STATE_DIM]);\n  float* u = &(reinterpret_cast<float*>(u_shared)[shared_idx * DYN_T::CONTROL_DIM]);\n  float* y = &(reinterpret_cast<float*>(y_shared)[shared_idx * DYN_T::OUTPUT_DIM]);\n  int* crash_status = &crash_status_shared[shared_idx];\n  const int cost_index = blockDim.x * (thread_idz * blockDim.y + thread_idy) + thread_idx;\n  float* running_cost = &running_cost_shared[cost_index];\n  float* running_cost_extra = &running_cost_shared[cost_index + num_shared * blockDim.y];\n  crash_status[0] = 0;  // We have not crashed yet as of the first trajectory.\n  running_cost[0] = 0;\n  running_cost_extra[0] = 0;\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  barrier* bar = &barrier_shared[shared_idx];\n  if (thread_idy == 0)\n  {\n    init(bar, blockDim.y);\n  }\n#endif\n  // The array to hold K(x,x*)\n  float fb_control[DYN_T::CONTROL_DIM];\n  int i;\n  float curr_cost = 0.0f;\n\n  // Load global array to shared array\n  ::mppi::kernels::loadGlobalToShared<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM>(\n      num_rollouts, blockDim.y, global_idx, thread_idy, thread_idz, init_x_d, x, xdot, u);\n  __syncthreads();\n\n  /*<----Start of simulation loop-----> */\n  dynamics->initializeDynamics(x, u, y, theta_s_shared, 0.0, dt);\n  sampling->initializeDistributions(y, 0.0, dt, theta_d_shared);\n  costs->initializeCosts(y, u, theta_c_shared, 0.0, dt);\n  fb_controller->initializeFeedback(x, u, theta_fb, 0.0, dt);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    // Load noise trajectories scaled by the exploration factor\n    sampling->readControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Now find feedback control\n    for (i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      fb_control[i] = 0;\n    }\n\n    // we do not apply feedback on the nominal state\n    if (thread_idz != NOMINAL_STATE_IDX)\n    {\n      fb_controller->k(x, x_nom, t, theta_fb, fb_control);\n    }\n\n    for (i = thread_idy; i < DYN_T::CONTROL_DIM; i += blockDim.y)\n    {\n      u[i] += fb_control[i];\n    }\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // applies constraints as defined in dynamics.cuh see specific dynamics class for what happens here\n    // usually just control clamping\n    dynamics->enforceConstraints(x, u);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    // copy back feedback-filled controls to global memory\n    sampling->writeControlSample(global_idx, t, distribution_idx, u, theta_d_shared, blockDim.y, thread_idy, y);\n\n    // Increment states\n    dynamics->step(x, x_next, xdot, u, y, theta_s_shared, t, dt);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n\n    // Compute cost\n    if (t < num_timesteps)\n    {\n      curr_cost = costs->computeRunningCost(y, u, t, theta_c_shared, crash_status);\n    }\n\n    if (thread_idz == NOMINAL_STATE_IDX && t < num_timesteps)\n    {\n      running_cost[0] += curr_cost;\n      running_cost_extra[0] +=\n          sampling->computeLikelihoodRatioCost(u, theta_d_shared, global_idx, t, distribution_idx, lambda, alpha);\n    }\n\n    if (thread_idz != NOMINAL_STATE_IDX && t < num_timesteps)\n    {\n      running_cost[0] += curr_cost + sampling->computeLikelihoodRatioCost(u, theta_d_shared, global_idx, t,\n                                                                          distribution_idx, lambda, alpha);\n      running_cost_extra[0] +=\n          curr_cost + sampling->computeFeedbackCost(fb_control, theta_d_shared, t, distribution_idx, lambda, alpha);\n    }\n\n    // We need running_state_cost_nom and running_control_cost_nom for the nominal system\n    // We need running_cost_real and running_cost_feedback for the real system\n    // Nominal system needs to know running_state_cost_nom, running_control_cost_nom, and running_cost_feedback\n    // Real system needs to know running_cost_real\n\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n    bar->arrive_and_wait();\n#else\n    __syncthreads();\n#endif\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n    x_temp = x_nom;\n    x_nom = x_nom_next;\n    x_nom_next = x_temp;\n  }\n\n  // Add all costs together\n  running_cost = &running_cost_shared[thread_idx + blockDim.x * blockDim.y * thread_idz];\n  running_cost_extra = &running_cost_shared[thread_idx + blockDim.x * blockDim.y * (blockDim.z + thread_idz)];\n  __syncthreads();\n  multiCostArrayReduction(running_cost, running_cost_extra, blockDim.y, thread_idy, blockDim.y, thread_idy == 0,\n                          blockDim.x);\n\n  if (thread_idy == 0)\n  {\n    *running_cost += costs->terminalCost(y, theta_c_shared);\n    if (thread_idz != NOMINAL_STATE_IDX)\n    {\n      *running_cost_extra += costs->terminalCost(y, theta_c_shared);\n    }\n    *running_cost /= ((float)num_timesteps);\n    *running_cost_extra /= ((float)num_timesteps);\n  }\n  __syncthreads();\n  if (thread_idz != NOMINAL_STATE_IDX && thread_idy == 0)\n  {\n    float* running_nom_cost = &running_cost_shared[blockDim.x * blockDim.y * NOMINAL_STATE_IDX];\n    const float* running_nom_cost_likelihood_ratio_cost =\n        &running_cost_shared[blockDim.x * blockDim.y * (blockDim.z + NOMINAL_STATE_IDX)];\n    *running_nom_cost =\n        0.5 * *running_nom_cost + 0.5 * fmaxf(fminf(*running_cost_extra, value_func_threshold), *running_nom_cost);\n    *running_nom_cost += *running_nom_cost_likelihood_ratio_cost;\n  }\n  __syncthreads();\n  if (thread_idy == 0)\n  {\n    trajectory_costs_d[global_idx + thread_idz * num_rollouts] = running_cost[0];\n  }\n}\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T, bool COALESCE>\nvoid launchSplitInitEvalKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                               SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                               const int num_rollouts, float lambda, float alpha, int samples_per_condition,\n                               int* __restrict__ strides_d, float* __restrict__ init_x_d, float* __restrict__ y_d,\n                               float* __restrict__ trajectory_costs, dim3 dimDynBlock, dim3 dimCostBlock,\n                               cudaStream_t stream, bool synchronize)\n{\n  if (num_rollouts % dimDynBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by eval dynamics block size x (\" << dimDynBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  if (num_timesteps < dimCostBlock.x)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_timesteps (\" << num_timesteps\n              << \") must be greater than or equal to eval cost block size x (\" << dimCostBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n\n  // Run Dynamics\n  const int gridsize_x = math::int_ceil(num_rollouts, dimDynBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned dynamics_shared_size = calcEvalDynKernelSharedMemSize(dynamics, sampling, dimDynBlock);\n  initEvalDynKernel<DYN_T, SAMPLING_T><<<dimGrid, dimDynBlock, dynamics_shared_size, stream>>>(\n      dynamics->model_d_, sampling->sampling_d_, dt, num_timesteps, num_rollouts, samples_per_condition, strides_d,\n      init_x_d, y_d);\n\n  // Run Costs\n  dim3 dimCostGrid(num_rollouts, 1, 1);\n  unsigned cost_shared_size =\n      calcEvalCostKernelSharedMemSize(costs, sampling, num_rollouts, samples_per_condition, dimCostBlock);\n\n  initEvalCostKernel<COST_T, SAMPLING_T, COALESCE><<<dimCostGrid, dimCostBlock, cost_shared_size, stream>>>(\n      costs->cost_d_, sampling->sampling_d_, dt, num_timesteps, num_rollouts, lambda, alpha, samples_per_condition,\n      strides_d, y_d, trajectory_costs);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T>\nvoid launchInitEvalKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling,\n                          float dt, const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                          int samples_per_condition, int* __restrict__ strides_d, float* __restrict__ init_x_d,\n                          float* __restrict__ trajectory_costs, dim3 dimBlock, cudaStream_t stream, bool synchronize)\n{\n  if (num_rollouts % dimBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by eval kernel block size x (\" << dimBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  // Run initEvalKernel\n  const int gridsize_x = math::int_ceil(num_rollouts, dimBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned shared_size =\n      calcEvalCombinedKernelSharedMemSize(dynamics, costs, sampling, num_rollouts, samples_per_condition, dimBlock);\n  initEvalKernel<DYN_T, COST_T, SAMPLING_T><<<dimGrid, dimBlock, shared_size, stream>>>(\n      dynamics->model_d_, costs->cost_d_, sampling->sampling_d_, dt, num_timesteps, num_rollouts, samples_per_condition,\n      lambda, alpha, strides_d, init_x_d, trajectory_costs);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T, class FB_T, int NOMINAL_STATE_IDX, bool COALESCE>\nvoid launchSplitRMPPIRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                                   SAMPLING_T* __restrict__ sampling, FB_T* __restrict__ fb_controller, float dt,\n                                   const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                   float value_func_threshold, float* __restrict__ init_x_d, float* __restrict__ y_d,\n                                   float* __restrict__ trajectory_costs, dim3 dimDynBlock, dim3 dimCostBlock,\n                                   cudaStream_t stream, bool synchronize)\n{\n  if (num_rollouts % dimDynBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by dynamics block size x (\" << dimDynBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  if (num_timesteps < dimCostBlock.x)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_timesteps (\" << num_timesteps\n              << \") must be greater than or equal to cost block size x (\" << dimCostBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  if (dimDynBlock.z < 2 || dimDynBlock.z != dimCostBlock.z)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): number of dynamics systems (\" << dimDynBlock.z\n              << \") and cost systems (\" << dimCostBlock.z << \") must be equal to each other and greater than 2\"\n              << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  // Run Dynamics\n  const int gridsize_x = math::int_ceil(num_rollouts, dimDynBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned dynamics_shared_size = calcRMPPIDynKernelSharedMemSize(dynamics, sampling, fb_controller, dimDynBlock);\n  rolloutRMPPIDynamicsKernel<DYN_T, FB_T, SAMPLING_T, NOMINAL_STATE_IDX>\n      <<<dimGrid, dimDynBlock, dynamics_shared_size, stream>>>(dynamics->model_d_, fb_controller->feedback_d_,\n                                                               sampling->sampling_d_, dt, num_timesteps, num_rollouts,\n                                                               init_x_d, y_d);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Run Costs\n  dim3 dimCostGrid(num_rollouts, 1, 1);\n  unsigned cost_shared_size = calcRMPPICostKernelSharedMemSize(costs, sampling, fb_controller, dimCostBlock);\n  rolloutRMPPICostKernel<COST_T, DYN_T, SAMPLING_T, FB_T, NOMINAL_STATE_IDX, COALESCE>\n      <<<dimCostGrid, dimCostBlock, cost_shared_size, stream>>>(\n          costs->cost_d_, dynamics->model_d_, fb_controller->feedback_d_, sampling->sampling_d_, dt, num_timesteps,\n          num_rollouts, lambda, alpha, value_func_threshold, init_x_d, y_d, trajectory_costs);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize || true)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n  HANDLE_ERROR(cudaGetLastError());\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T, class FB_T, int NOMINAL_STATE_IDX>\nvoid launchRMPPIRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                              SAMPLING_T* __restrict__ sampling, FB_T* __restrict__ fb_controller, float dt,\n                              const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                              float value_func_threshold, float* __restrict__ init_x_d,\n                              float* __restrict__ trajectory_costs, dim3 dimBlock, cudaStream_t stream,\n                              bool synchronize)\n{\n  if (num_rollouts % dimBlock.x != 0)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_rollouts (\" << num_rollouts\n              << \") must be evenly divided by dynamics block size x (\" << dimBlock.x << \")\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  // if (num_timesteps < dimCostBlock.x)\n  // {\n  //   std::cerr << __FILE__ << \" (\" << __LINE__ << \"): num_timesteps (\" << num_timesteps\n  //             << \") must be greater than or equal to cost block size x (\" << dimCostBlock.x << \")\" << std::endl;\n  //   exit(EXIT_FAILURE);\n  // }\n  if (dimBlock.z < 2)\n  {\n    std::cerr << __FILE__ << \" (\" << __LINE__ << \"): number of dynamics systems (\" << dimBlock.z\n              << \")  must be greater than 2\" << std::endl;\n    exit(EXIT_FAILURE);\n  }\n  // Run RMPPI Rollout kernel\n  const int gridsize_x = math::int_ceil(num_rollouts, dimBlock.x);\n  dim3 dimGrid(gridsize_x, 1, 1);\n  unsigned shared_size = calcRMPPICombinedKernelSharedMemSize(dynamics, costs, sampling, fb_controller, dimBlock);\n  rolloutRMPPIKernel<DYN_T, COST_T, FB_T, SAMPLING_T, NOMINAL_STATE_IDX><<<dimGrid, dimBlock, shared_size, stream>>>(\n      dynamics->model_d_, costs->cost_d_, fb_controller->feedback_d_, sampling->sampling_d_, dt, num_timesteps,\n      num_rollouts, lambda, alpha, value_func_threshold, init_x_d, trajectory_costs);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize || true)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n  HANDLE_ERROR(cudaGetLastError());\n}\n\ntemplate <class DYN_T, class SAMPLER_T>\nunsigned calcEvalDynKernelSharedMemSize(const DYN_T* dynamics, const SAMPLER_T* sampler, dim3& dimBlock)\n{\n  const int dynamics_num_shared = dimBlock.x * dimBlock.z;\n  unsigned dynamics_shared_size =\n      sizeof(float) * (3 * math::nearest_multiple_4(dynamics_num_shared * DYN_T::STATE_DIM) +\n                       math::nearest_multiple_4(dynamics_num_shared * DYN_T::OUTPUT_DIM) +\n                       math::nearest_multiple_4(dynamics_num_shared * DYN_T::CONTROL_DIM)) +\n      calcClassSharedMemSize<DYN_T>(dynamics, dimBlock) + calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_DYN\n  dynamics_shared_size += math::int_multiple_const(dynamics_num_shared * sizeof(barrier), 16);\n#endif\n  return dynamics_shared_size;\n}\n\ntemplate <class COST_T, class SAMPLER_T>\nunsigned calcEvalCostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, const int num_rollouts,\n                                         const int samples_per_condition, dim3& dimBlock)\n{\n  const int cost_num_shared = dimBlock.x * dimBlock.z;\n  unsigned cost_shared_size = sizeof(float) * (math::nearest_multiple_4(cost_num_shared * COST_T::OUTPUT_DIM) +\n                                               math::nearest_multiple_4(cost_num_shared * COST_T::CONTROL_DIM) +\n                                               math::nearest_multiple_4(cost_num_shared * dimBlock.y)) +\n                              sizeof(int) * math::nearest_multiple_4(cost_num_shared) +\n                              calcClassSharedMemSize(cost, dimBlock) +\n                              calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock) +\n                              math::nearest_multiple_4(num_rollouts / samples_per_condition) * sizeof(float);\n#ifdef USE_CUDA_BARRIERS_COST\n  cost_shared_size += math::int_multiple_const(cost_num_shared * sizeof(barrier), 16);\n#endif\n  return cost_shared_size;\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nunsigned calcEvalCombinedKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                             const int num_rollouts, const int samples_per_condition, dim3& dimBlock)\n{\n  const int num_shared = dimBlock.x * dimBlock.z;\n  unsigned shared_size =\n      sizeof(float) * (3 * math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM) +\n                       math::nearest_multiple_4(num_shared * DYN_T::OUTPUT_DIM) +\n                       math::nearest_multiple_4(num_shared * DYN_T::CONTROL_DIM) +\n                       math::nearest_multiple_4(num_shared * dimBlock.y)) +\n      sizeof(int) * math::nearest_multiple_4(num_shared) + calcClassSharedMemSize<DYN_T>(dynamics, dimBlock) +\n      calcClassSharedMemSize<COST_T>(cost, dimBlock) + calcClassSharedMemSize<SAMPLER_T>(sampler, dimBlock);\n#ifdef USE_CUDA_BARRIERS_ROLLOUT\n  shared_size += math::int_multiple_const(num_shared * sizeof(barrier), 16);\n#endif\n  return shared_size;\n}\n\ntemplate <class DYN_T, class SAMPLER_T, class FB_T>\nunsigned calcRMPPIDynKernelSharedMemSize(const DYN_T* dynamics, const SAMPLER_T* sampler, const FB_T* fb_controller,\n                                         dim3& dimBlock)\n{\n  const int dynamics_num_shared = dimBlock.x * dimBlock.z;\n  unsigned dynamics_shared_size =\n      sizeof(float) * (3 * math::nearest_multiple_4(dynamics_num_shared * DYN_T::STATE_DIM) +\n                       math::nearest_multiple_4(dynamics_num_shared * DYN_T::OUTPUT_DIM) +\n                       math::nearest_multiple_4(dynamics_num_shared * DYN_T::CONTROL_DIM)) +\n      calcClassSharedMemSize(dynamics, dimBlock) + calcClassSharedMemSize(sampler, dimBlock) +\n      calcClassSharedMemSize(fb_controller, dimBlock);\n#ifdef USE_CUDA_BARRIERS_DYN\n  dynamics_shared_size += math::int_multiple_const(dynamics_num_shared * sizeof(barrier), 16);\n#endif\n  return dynamics_shared_size;\n}\n\ntemplate <class COST_T, class SAMPLER_T, class FB_T>\nunsigned calcRMPPICostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, const FB_T* fb_controller,\n                                          dim3& dimBlock)\n{\n  const int cost_num_shared = dimBlock.x * dimBlock.z;\n  unsigned cost_shared_size = sizeof(float) * (math::nearest_multiple_4(cost_num_shared * COST_T::OUTPUT_DIM) +\n                                               math::nearest_multiple_4(cost_num_shared * COST_T::CONTROL_DIM) +\n                                               math::nearest_multiple_4(cost_num_shared * dimBlock.y * 2)) +\n                              sizeof(int) * math::nearest_multiple_4(cost_num_shared) +\n                              calcClassSharedMemSize(cost, dimBlock) + calcClassSharedMemSize(sampler, dimBlock) +\n                              calcClassSharedMemSize(fb_controller, dimBlock);\n#ifdef USE_CUDA_BARRIERS_COST\n  cost_shared_size += math::int_multiple_const(cost_num_shared * sizeof(barrier), 16);\n#endif\n  return cost_shared_size;\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T, class FB_T>\nunsigned calcRMPPICombinedKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                              const FB_T* fb_controller, dim3& dimBlock)\n{\n  const int num_shared = dimBlock.x * dimBlock.z;\n  unsigned shared_size = sizeof(float) * (3 * math::nearest_multiple_4(num_shared * DYN_T::STATE_DIM) +\n                                          math::nearest_multiple_4(num_shared * DYN_T::OUTPUT_DIM) +\n                                          math::nearest_multiple_4(num_shared * DYN_T::CONTROL_DIM) +\n                                          math::nearest_multiple_4(num_shared * dimBlock.y * 2)) +\n                         sizeof(int) * math::nearest_multiple_4(num_shared) +\n                         calcClassSharedMemSize(dynamics, dimBlock) + calcClassSharedMemSize(sampler, dimBlock) +\n                         calcClassSharedMemSize(fb_controller, dimBlock) + calcClassSharedMemSize(cost, dimBlock);\n#ifdef USE_CUDA_BARRIERS_DYN\n  shared_size += math::int_multiple_const(num_shared * sizeof(barrier), 16);\n#endif\n  return shared_size;\n}\n\n__device__ void multiCostArrayReduction(float* running_cost, float* running_cost_extra, const int start_size,\n                                        const int index, const int step, const bool catch_condition, const int stride)\n{\n  int prev_size = start_size;\n  const bool block_power_of_2 = (prev_size & (prev_size - 1)) == 0;\n  const int stop_condition = (block_power_of_2) ? 32 : 0;\n  int size;\n  int j;\n\n  for (size = prev_size / 2; size > stop_condition; size /= 2)\n  {\n    for (j = index; j < size; j += step)\n    {\n      running_cost[j * stride] += running_cost[(j + size) * stride];\n      running_cost_extra[j * stride] += running_cost_extra[(j + size) * stride];\n    }\n    __syncthreads();\n    if (prev_size - 2 * size == 1 && catch_condition)\n    {\n      running_cost[(size - 1) * stride] += running_cost[(prev_size - 1) * stride];\n      running_cost_extra[(size - 1) * stride] += running_cost_extra[(prev_size - 1) * stride];\n    }\n    __syncthreads();\n    prev_size = size;\n  }\n  switch (size * 2)\n  {\n    case 64:\n      if (index < 32)\n      {\n        warpReduceAdd<64>(running_cost, index, stride);\n        warpReduceAdd<64>(running_cost_extra, index, stride);\n      }\n      break;\n    case 32:\n      if (index < 16)\n      {\n        warpReduceAdd<32>(running_cost, index, stride);\n        warpReduceAdd<32>(running_cost_extra, index, stride);\n      }\n      break;\n    case 16:\n      if (index < 8)\n      {\n        warpReduceAdd<16>(running_cost, index, stride);\n        warpReduceAdd<16>(running_cost_extra, index, stride);\n      }\n      break;\n    case 8:\n      if (index < 4)\n      {\n        warpReduceAdd<8>(running_cost, index, stride);\n        warpReduceAdd<8>(running_cost_extra, index, stride);\n      }\n      break;\n    case 4:\n      if (index < 2)\n      {\n        warpReduceAdd<4>(running_cost, index, stride);\n        warpReduceAdd<4>(running_cost_extra, index, stride);\n      }\n      break;\n    case 2:\n      if (index < 1)\n      {\n        warpReduceAdd<2>(running_cost, index, stride);\n        warpReduceAdd<2>(running_cost_extra, index, stride);\n      }\n      break;\n  }\n  __syncthreads();\n}\n}  // namespace rmppi\n}  // namespace kernels\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/core/rmppi_kernels.cuh",
    "content": "/**\n * Created by Bogdan Vlahov on 3/25/2023\n **/\n#pragma once\n#include <mppi/utils/math_utils.h>\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/cost_functions/cost.cuh>\n\nnamespace mppi\n{\nnamespace kernels\n{\nnamespace rmppi\n{\n/**\n * Kernels Methods\n **/\ntemplate <class DYN_T, class SAMPLING_T>\n__global__ void initEvalDynKernel(DYN_T* __restrict__ dynamics, SAMPLING_T* __restrict__ sampling, float dt,\n                                  const int num_timesteps, const int num_rollouts, int samples_per_condition,\n                                  const int* __restrict__ strides_d, const float* __restrict__ states_d,\n                                  float* __restrict__ y_d);\n\ntemplate <class COST_T, class SAMPLING_T, bool COALESCE = false>\n__global__ void initEvalCostKernel(COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling, float dt,\n                                   const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                   const int samples_per_condition, const int* __restrict__ strides_d,\n                                   const float* __restrict__ y_d, float* __restrict__ trajectory_costs_d);\n\ntemplate <class DYN_T, class FB_T, class SAMPLING_T, int NOMINAL_STATE_IDX = 0>\n__global__ void rolloutRMPPIDynamicsKernel(DYN_T* __restrict__ dynamics, FB_T* __restrict__ fb_controller,\n                                           SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                                           const int num_rollouts, const float* __restrict__ init_x_d,\n                                           float* __restrict__ y_d);\n\ntemplate <class COST_T, class DYN_T, class SAMPLING_T, class FB_T, int NOMINAL_STATE_IDX = 0, bool COALESCE = false>\n__global__ void rolloutRMPPICostKernel(COST_T* __restrict__ costs, DYN_T* __restrict__ dynamics,\n                                       FB_T* __restrict__ fb_controller, SAMPLING_T* __restrict__ sampling, float dt,\n                                       const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                       float value_func_threshold, const float* __restrict__ init_x_d,\n                                       const float* __restrict__ y_d, float* __restrict__ trajectory_costs_d);\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T>\n__global__ void initEvalKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                               SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                               const int num_rollouts, int samples_per_condition, float lambda, float alpha,\n                               const int* __restrict__ strides_d, const float* __restrict__ states_d,\n                               float* __restrict__ trajectory_costs_d);\n\ntemplate <class DYN_T, class COST_T, class FB_T, class SAMPLING_T, int NOMINAL_STATE_IDX = 0>\n__global__ void rolloutRMPPIKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                                   FB_T* __restrict__ fb_controller, SAMPLING_T* __restrict__ sampling, float dt,\n                                   const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                   float value_func_threshold, const float* __restrict__ init_x_d,\n                                   float* __restrict__ trajectory_costs_d);\n/**\n * Launch Methods\n **/\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T, bool COALESCE = false>\nvoid launchSplitInitEvalKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                               SAMPLING_T* __restrict__ sampling, float dt, const int num_timesteps,\n                               const int num_rollouts, float lambda, float alpha, int samples_per_condition,\n                               int* __restrict__ strides_d, float* __restrict__ init_x_d, float* __restrict__ y_d,\n                               float* __restrict__ trajectory_costs, dim3 dimDynBlock, dim3 dimCostBlock,\n                               cudaStream_t stream, bool synchronize = true);\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T, class FB_T, int NOMINAL_STATE_IDX = 0, bool COALESCE = false>\nvoid launchSplitRMPPIRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                                   SAMPLING_T* __restrict__ sampling, FB_T* __restrict__ fb_controller, float dt,\n                                   const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                                   float value_func_threshold, float* __restrict__ init_x_d, float* __restrict__ y_d,\n                                   float* __restrict__ trajectory_costs, dim3 dimDynBlock, dim3 dimCostBlock,\n                                   cudaStream_t stream, bool synchronize = true);\n\ntemplate <class DYN_T, class COST_T, typename SAMPLING_T>\nvoid launchInitEvalKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs, SAMPLING_T* __restrict__ sampling,\n                          float dt, const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                          int samples_per_condition, int* __restrict__ strides_d, float* __restrict__ init_x_d,\n                          float* __restrict__ trajectory_costs, dim3 dimBlock, cudaStream_t stream, bool synchronize = true);\n\ntemplate <class DYN_T, class COST_T, class SAMPLING_T, class FB_T, int NOMINAL_STATE_IDX = 0>\nvoid launchRMPPIRolloutKernel(DYN_T* __restrict__ dynamics, COST_T* __restrict__ costs,\n                              SAMPLING_T* __restrict__ sampling, FB_T* __restrict__ fb_controller, float dt,\n                              const int num_timesteps, const int num_rollouts, float lambda, float alpha,\n                              float value_func_threshold, float* __restrict__ init_x_d,\n                              float* __restrict__ trajectory_costs, dim3 dimBlock, cudaStream_t stream,\n                              bool synchronize = true);\n\n/**\n * Device-only Kernel Helper Methods\n **/\n__device__ void multiCostArrayReduction(float* running_cost, float* running_cost_extra, const int start_size,\n                                        const int index, const int step, const bool catch_condition,\n                                        const int stride = 1);\n\n/**\n * Shared Memory Calculators for various kernels\n */\ntemplate <class DYN_T, class SAMPLER_T>\nunsigned calcEvalDynKernelSharedMemSize(const DYN_T* dynamics, const SAMPLER_T* sampler, dim3& dimBlock);\n\ntemplate <class COST_T, class SAMPLER_T>\nunsigned calcEvalCostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, const int num_rollouts,\n                                         const int samples_per_condition, dim3& dimBlock);\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nunsigned calcEvalCombinedKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                             const int num_rollouts, const int samples_per_condition, dim3& dimBlock);\n\ntemplate <class DYN_T, class SAMPLER_T, class FB_T>\nunsigned calcRMPPIDynKernelSharedMemSize(const DYN_T* dynamics, const SAMPLER_T* sampler, const FB_T* fb_controller,\n                                         dim3& dimBlock);\n\ntemplate <class COST_T, class SAMPLER_T, class FB_T>\nunsigned calcRMPPICostKernelSharedMemSize(const COST_T* cost, const SAMPLER_T* sampler, const FB_T* fb_controller,\n                                          dim3& dimBlock);\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T, class FB_T>\nunsigned calcRMPPICombinedKernelSharedMemSize(const DYN_T* dynamics, const COST_T* cost, const SAMPLER_T* sampler,\n                                              const FB_T* fb_controller, dim3& dimBlock);\n}  // namespace rmppi\n}  // namespace kernels\n}  // namespace mppi\n\n#if __CUDACC__\n#include \"rmppi_kernels.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/cost_functions/autorally/ar_robust_cost.cu",
    "content": "\ntemplate <class CLASS_T, class PARAMS_T>\nARRobustCostImpl<CLASS_T, PARAMS_T>::ARRobustCostImpl(cudaStream_t stream)\n  : ARStandardCostImpl<CLASS_T, PARAMS_T>(stream)\n{\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nARRobustCostImpl<CLASS_T, PARAMS_T>::~ARRobustCostImpl()\n{\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float ARRobustCostImpl<CLASS_T, PARAMS_T>::getStabilizingCost(const float* s)\n{\n  float penalty_val = 0;\n  float slip;\n  if (fabs(s[4]) < 0.001)\n  {\n    slip = 0;\n  }\n  else\n  {\n    slip = fabs(-atan(s[5] / fabs(s[4])));\n  }\n  if (slip >= 0.75 * this->params_.max_slip_ang)\n  {\n    float slip_val = fminf(1.0, slip / this->params_.max_slip_ang);\n    float alpha = (slip_val - 0.75) / (1.0 - 0.75);\n    penalty_val = alpha * this->params_.crash_coeff;\n  }\n  // crash if roll is too large\n  if (fabs(s[3]) >= M_PI_2)\n  {\n    penalty_val = this->params_.crash_coeff;\n  }\n  return this->params_.slip_coeff * slip + penalty_val;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float ARRobustCostImpl<CLASS_T, PARAMS_T>::getCostmapCost(const float* s)\n{\n  float cost = 0;\n\n  // Compute a transformation to get the (x,y) positions of the front and back of the car.\n#ifdef __CUDA_ARCH__\n  float x_front = s[0] + this->FRONT_D * __cosf(s[2]);\n  float y_front = s[1] + this->FRONT_D * __sinf(s[2]);\n  float x_back = s[0] + this->BACK_D * __cosf(s[2]);\n  float y_back = s[1] + this->BACK_D * __sinf(s[2]);\n#else\n  float x_front = s[0] + this->FRONT_D * cosf(s[2]);\n  float y_front = s[1] + this->FRONT_D * sinf(s[2]);\n  float x_back = s[0] + this->BACK_D * cosf(s[2]);\n  float y_back = s[1] + this->BACK_D * sinf(s[2]);\n#endif\n\n  float u, v, w;  // Transformed coordinates\n\n  // parameters for the front and back of car\n  this->coorTransform(x_front, y_front, &u, &v, &w);\n#ifdef __CUDA_ARCH__\n  float4 track_params_front = tex2D<float4>(this->costmap_tex_d_, u / w, v / w);\n#else\n  float2 query = make_float2(u / w * this->width_, v / w * this->height_);\n  query.x = query.x - 0.5f;\n  query.y = query.y - 0.5f;\n  query.x = fmaxf(0.0f, fminf(this->width_ - 1, query.x));\n  query.y = fmaxf(0.0f, fminf(this->height_ - 1, query.y));\n  float4 track_params_front = this->track_costs_[std::round(query.y) * this->width_ + std::round(query.x)];\n#endif\n\n  // Calculate back texture query\n  this->coorTransform(x_back, y_back, &u, &v, &w);\n#ifdef __CUDA_ARCH__\n  float4 track_params_back = tex2D<float4>(this->costmap_tex_d_, u / w, v / w);\n#else\n  query = make_float2(u / w * this->width_, v / w * this->height_);\n  query.x = query.x - 0.5f;\n  query.y = query.y - 0.5f;\n  query.x = fmaxf(0.0f, fminf(this->width_ - 1, query.x));\n  query.y = fmaxf(0.0f, fminf(this->height_ - 1, query.y));\n  float4 track_params_back = this->track_costs_[std::round(query.y) * this->width_ + std::round(query.x)];\n#endif\n  // printf(\"thread (%d %d %d) front val (%f, %f) %f back_val (%f, %f) %f\\n\", threadIdx.x, threadIdx.y, threadIdx.z,\n  // x_front, y_front, track_params_front.x, x_back, y_back, track_params_back.x);\n\n  // Calculate the constraint penalty\n  float constraint_val = fminf(1.0, fmaxf(track_params_front.x, track_params_back.x));\n\n  if (constraint_val >= this->params_.boundary_threshold)\n  {\n    float alpha = (constraint_val - this->params_.boundary_threshold) / (1.0 - this->params_.boundary_threshold);\n    cost += alpha * this->params_.crash_coeff;\n  }\n\n  // Calculate the track positioning cost\n  if (track_params_front.y > this->params_.track_slop)\n  {\n    cost += this->params_.track_coeff * track_params_front.y;\n  }\n\n  // Calculate the speed cost\n  if (this->params_.desired_speed == -1)\n  {\n    cost += this->params_.speed_coeff * fabs(s[4] - track_params_front.z);\n  }\n  else\n  {\n    cost += this->params_.speed_coeff * fabs(s[4] - this->params_.desired_speed);\n  }\n\n  // Calculate the heading cost\n  cost += this->params_.heading_coeff * fabs(sinf(s[2]) + track_params_front.w);\n\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\ninline __host__ __device__ float ARRobustCostImpl<CLASS_T, PARAMS_T>::computeStateCost(const float* s, int timestep,\n                                                                                       float* theta_c,\n                                                                                       int* crash_status)\n{\n  float stabilizing_cost = getStabilizingCost(s);\n  float costmap_cost = getCostmapCost(s);\n  float cost = stabilizing_cost + costmap_cost;\n  if (cost > this->MAX_COST_VALUE || isnan(cost))\n  {  // TODO Handle max cost value in a generic way\n    cost = this->MAX_COST_VALUE;\n  }\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\ninline float ARRobustCostImpl<CLASS_T, PARAMS_T>::computeStateCost(const Eigen::Ref<const output_array> y, int timestep,\n                                                                   int* crash_status)\n{\n  return computeStateCost(y.data(), timestep, nullptr, crash_status);\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/autorally/ar_robust_cost.cuh",
    "content": "#ifndef AR_ROBUST_COST_CUH_\n#define AR_ROBUST_COST_CUH_\n\n#include <mppi/cost_functions/autorally/ar_standard_cost.cuh>\n\nstruct ARRobustCostParams : public ARStandardCostParams\n{\n  // Miscellaneous\n  float heading_coeff = 0.0;\n\n  ARRobustCostParams()\n  {\n    control_cost_coeff[0] = 0.0;  // steering_coeff\n    control_cost_coeff[1] = 0.0;  // throttle_coeff\n    // values, if negative ignored\n    desired_speed = -1;\n    max_slip_ang = 1.5;\n\n    // Cost term coefficients\n    track_coeff = 33.0;\n    slip_coeff = 0.0;\n    speed_coeff = 20.0;\n    // Constraint penalty thresholds\n    crash_coeff = 125000;\n    boundary_threshold = 0.75;\n    // Miscellaneous\n    track_slop = 0;\n  }\n};\n\ntemplate <class CLASS_T, class PARAMS_T = ARRobustCostParams>\nclass ARRobustCostImpl : public ARStandardCostImpl<CLASS_T, PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = ARStandardCostImpl<CLASS_T, PARAMS_T>;\n  using output_array = typename PARENT_CLASS::output_array;\n\n  ARRobustCostImpl(cudaStream_t stream = 0);  // : ARStandardCost<PARAMS_T>(steam);\n  ~ARRobustCostImpl();\n\n  std::string getCostFunctionName()\n  {\n    return \"AutoRally robust cost function\";\n  }\n\n  __host__ __device__ float getStabilizingCost(const float* s);\n  __host__ __device__ float getCostmapCost(const float* s);\n  __host__ __device__ float computeStateCost(const float* s, int timestep, float* theta_c, int* crash_status);\n\n  float computeStateCost(const Eigen::Ref<const output_array> y, int timestep, int* crash_status);\n\n  using PARENT_CLASS::terminalCost;\n\nprivate:\n};\n\n#if __CUDACC__\n#include \"ar_robust_cost.cu\"\n#endif\n\nclass ARRobustCost : public ARRobustCostImpl<ARRobustCost>\n{\npublic:\n  ARRobustCost(cudaStream_t stream = 0) : ARRobustCostImpl<ARRobustCost>(stream)\n  {\n  }\n};\n\n#endif  // AR_ROBUST_COST_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/autorally/ar_standard_cost.cu",
    "content": "#include <mppi/cost_functions/autorally/ar_standard_cost.cuh>\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::ARStandardCostImpl(cudaStream_t stream)\n{\n  this->bindToStream(stream);\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid ARStandardCostImpl<CLASS_T, PARAMS_T,  DYN_PARAMS_T>::freeCudaMem() {\n  if (this->GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFreeArray(costmapArray_d_));\n    costmapArray_d_ = nullptr;\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::paramsToDevice()\n{\n  HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->params_, &this->params_, sizeof(PARAMS_T), cudaMemcpyHostToDevice,\n                               this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->width_, &width_, sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(\n      cudaMemcpyAsync(&this->cost_d_->height_, &height_, sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nbool ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::changeCostmapSize(int width, int height)\n{\n  // TODO set flag at top that indicates memory allocation changes\n  if (height < 0 && width < 0)\n  {\n    std::cerr << \"ERROR: cannot resize costmap to size less than 1\" << std::endl;\n    return false;\n  }\n  if (height != height_ || width != width_ || costmapArray_d_ == nullptr)\n  {\n    track_costs_.resize(width * height);\n\n    // Allocate memory for the cuda array which is bound the costmap_tex_\n    // has been allocated in the past, must be freed\n    if (height_ > 0 && width_ > 0)\n    {\n      HANDLE_ERROR(cudaFreeArray(costmapArray_d_));\n    }\n    // 4 floats of size 32 bits\n    channelDesc_ = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);\n    HANDLE_ERROR(cudaMallocArray(&costmapArray_d_, &channelDesc_, width, height));\n\n    // set all of the elements in the array to be zero\n    std::vector<float4> zero_array(width_ * height_);\n    zero_array.resize(width * height, make_float4(0, 0, 0, 0));\n    HANDLE_ERROR(cudaMemcpyToArray(costmapArray_d_, 0, 0, zero_array.data(), width * height * sizeof(float4),\n                                   cudaMemcpyHostToDevice));\n  }\n\n  width_ = width;\n  height_ = height;\n  return true;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::clearCostmapCPU(int width, int height)\n{\n  changeCostmapSize(width, height);\n\n  if (width_ < 0 && height_ < 0)\n  {\n    return;\n  }\n\n  for (int i = 0; i < width_ * height_; i++)\n  {\n    track_costs_[i].x = 0;\n    track_costs_[i].y = 0;\n    track_costs_[i].z = 0;\n    track_costs_[i].w = 0;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nstd::vector<float4> ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::loadTrackData(std::string map_path)\n{\n  // check if file exists\n  if (!fileExists(map_path))\n  {\n    std::cerr << \"ERROR: map path invalid, \" << map_path << std::endl;\n    return std::vector<float4>();\n  }\n\n  // load the npz file\n  cnpy::npz_t map_dict = cnpy::npz_load(map_path);\n  float x_min, x_max, y_min, y_max, ppm;\n  float* xBounds = map_dict[\"xBounds\"].data<float>();\n  float* yBounds = map_dict[\"yBounds\"].data<float>();\n  float* pixelsPerMeter = map_dict[\"pixelsPerMeter\"].data<float>();\n  x_min = xBounds[0];\n  x_max = xBounds[1];\n  y_min = yBounds[0];\n  y_max = yBounds[1];\n  ppm = pixelsPerMeter[0];\n\n  int width = int((x_max - x_min) * ppm);\n  int height = int((y_max - y_min) * ppm);\n\n  if (!changeCostmapSize(width, height))\n  {\n    std::cerr << \"ERROR: load track has invalid sizes\" << std::endl;\n    return std::vector<float4>();\n  }\n\n  float* channel0 = map_dict[\"channel0\"].data<float>();\n  float* channel1 = map_dict[\"channel1\"].data<float>();\n  float* channel2 = map_dict[\"channel2\"].data<float>();\n  float* channel3 = map_dict[\"channel3\"].data<float>();\n\n  // copy the track data into CPU side storage\n  for (int i = 0; i < width_ * height_; i++)\n  {\n    // std::cout << i << \" = \" << channel0[i] << \", \" << channel1[i] << \", \" << channel2[i] << \", \" << channel3[i] <<\n    // std::endl;\n    track_costs_[i].x = channel0[i];\n    track_costs_[i].y = channel1[i];\n    track_costs_[i].z = channel2[i];\n    track_costs_[i].w = channel3[i];\n  }\n\n  Eigen::Matrix3f R;\n  Eigen::Array3f trs;\n\n  // Save the scaling and offset\n  R << 1. / (x_max - x_min), 0, 0, 0, 1. / (y_max - y_min), 0, 0, 0, 1;\n  trs << -x_min / (x_max - x_min), -y_min / (y_max - y_min), 1;\n\n  updateTransform(R, trs);\n  costmapToTexture();\n\n  return track_costs_;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::costmapToTexture()\n{\n  if (width_ < 0 || height_ < 0)\n  {\n    std::cerr << \"ERROR: cannot allocate texture with zero size\" << std::endl;\n    return;\n  }\n\n  // transfer CPU version of costmap to GPU\n  float4* costmap_ptr = track_costs_.data();\n  HANDLE_ERROR(\n      cudaMemcpyToArray(costmapArray_d_, 0, 0, costmap_ptr, width_ * height_ * sizeof(float4), cudaMemcpyHostToDevice));\n  cudaStreamSynchronize(this->stream_);\n\n  // Specify texture\n  struct cudaResourceDesc resDesc;\n  memset(&resDesc, 0, sizeof(resDesc));\n  resDesc.resType = cudaResourceTypeArray;\n  resDesc.res.array.array = costmapArray_d_;\n\n  // Specify texture object parameters\n  struct cudaTextureDesc texDesc;\n  memset(&texDesc, 0, sizeof(texDesc));\n  texDesc.addressMode[0] = cudaAddressModeClamp;\n  texDesc.addressMode[1] = cudaAddressModeClamp;\n  texDesc.filterMode = cudaFilterModePoint;\n  texDesc.readMode = cudaReadModeElementType;\n  texDesc.normalizedCoords = 1;\n\n  // Destroy current texture and create new texture object\n  HANDLE_ERROR(cudaDestroyTextureObject(costmap_tex_d_));\n  HANDLE_ERROR(cudaCreateTextureObject(&costmap_tex_d_, &resDesc, &texDesc, NULL));\n\n  // copy over pointers setup up on CPU code to GPU\n  HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->costmapArray_d_, &costmapArray_d_, sizeof(cudaArray*),\n                               cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->costmap_tex_d_, &costmap_tex_d_, sizeof(cudaTextureObject_t),\n                               cudaMemcpyHostToDevice, this->stream_));\n  cudaStreamSynchronize(this->stream_);\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __device__ float4 ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::queryTexture(float x, float y) const\n{\n  // printf(\"\\nquerying point (%f, %f)\", x, y);\n  return tex2D<float4>(costmap_tex_d_, x, y);\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::updateTransform(Eigen::MatrixXf m, Eigen::ArrayXf trs)\n{\n  this->params_.r_c1.x = m(0, 0);\n  this->params_.r_c1.y = m(1, 0);\n  this->params_.r_c1.z = m(2, 0);\n  this->params_.r_c2.x = m(0, 1);\n  this->params_.r_c2.y = m(1, 1);\n  this->params_.r_c2.z = m(2, 1);\n  this->params_.trs.x = trs(0);\n  this->params_.trs.y = trs(1);\n  this->params_.trs.z = trs(2);\n  // Move the updated parameters to gpu memory\n  if (this->GPUMemStatus_)\n  {\n    paramsToDevice();\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\n__host__ __device__ void ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::coorTransform(float x, float y, float* u,\n                                                                                            float* v, float* w)\n{\n  ////Compute a projective transform of (x, y, 0, 1)\n  // printf(\"coordiante transform %f, %f, %f\\n\", params_.r_c1.x, params_.r_c2.x, params_.trs.x);\n  // converts to the texture [0-1] coordinate system\n  u[0] = this->params_.r_c1.x * x + this->params_.r_c2.x * y + this->params_.trs.x;\n  v[0] = this->params_.r_c1.y * x + this->params_.r_c2.y * y + this->params_.trs.y;\n  w[0] = this->params_.r_c1.z * x + this->params_.r_c2.z * y + this->params_.trs.z;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\n__host__ __device__ float4 ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::queryTextureTransformed(float x,\n                                                                                                        float y)\n{\n  float u, v, w;\n  coorTransform(x, y, &u, &v, &w);\n  // printf(\"input coordinates: %f, %f\\n\", x, y);\n  // printf(\"\\nu = %f, v = %f, w = %f\", u, v, w);\n  // printf(\"transformed coordinates %f, %f = %f\\n\", u/w, v/w, tex2D<float4>(costmap_tex_d_, u/w, v/w).x);\n#ifdef __CUDA_ARCH__\n  return tex2D<float4>(costmap_tex_d_, u / w, v / w);\n#else\n  float2 query = make_float2(u / w * width_, v / w * height_);\n  query.x = query.x - 0.5f;\n  query.y = query.y - 0.5f;\n  query.x = fmaxf(0.0f, fminf(width_ - 1, query.x));\n  query.y = fmaxf(0.0f, fminf(height_ - 1, query.y));\n  return this->track_costs_[std::round(query.y) * width_ + std::round(query.x)];\n#endif\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nEigen::Matrix3f ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::getRotation()\n{\n  Eigen::Matrix3f m;\n  m(0, 0) = this->params_.r_c1.x;\n  m(1, 0) = this->params_.r_c1.y;\n  m(2, 0) = this->params_.r_c1.z;\n  m(0, 1) = this->params_.r_c2.x;\n  m(1, 1) = this->params_.r_c2.y;\n  m(2, 1) = this->params_.r_c2.z;\n  m(0, 2) = 0.0;\n  m(1, 2) = 0.0;\n  m(2, 2) = 1.0;\n  return m;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nEigen::Array3f ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::getTranslation()\n{\n  Eigen::Array3f array;\n  array(0) = this->params_.trs.x;\n  array(1) = this->params_.trs.y;\n  array(2) = this->params_.trs.z;\n  return array;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __device__ float ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::terminalCost(float* s, float* theta_c)\n{\n  return 0.0;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nfloat ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::terminalCost(const Eigen::Ref<const output_array> y)\n{\n  return 0.0;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __host__ __device__ float ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::getSpeedCost(float* s, int* crash)\n{\n  float cost = 0;\n  float error = s[4] - this->params_.desired_speed;\n  if (l1_cost_)\n  {\n    cost = fabs(error);\n  }\n  else\n  {\n    cost = error * error;\n  }\n  return (this->params_.speed_coeff * cost);\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __host__ __device__ float\nARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::getStabilizingCost(float* s, int* crash_status)\n{\n  float stabilizing_cost = 0;\n  if (fabs(s[4]) > 0.001)\n  {\n    float slip = -atan(s[5] / fabs(s[4]));\n    stabilizing_cost = this->params_.slip_coeff * powf(slip, 2);\n    if (fabs(-atan(s[5] / fabs(s[4]))) > this->params_.max_slip_ang)\n    {\n      // If the slip angle is above the max slip angle kill the trajectory.\n      stabilizing_cost += this->params_.crash_coeff;\n    }\n  }\n  // if we roll over kill the trajectory\n  if (fabs(s[3]) > M_PI_2)\n  {\n    crash_status[0] = 1;\n  }\n  // printf(\"stabilizing %f\\n\", stabilizing_cost);\n  return stabilizing_cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __host__ __device__ float ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::getCrashCost(float* s, int* crash,\n                                                                                                   int num_timestep)\n{\n  float crash_cost = 0;\n  if (crash[0] > 0)\n  {\n    crash_cost = this->params_.crash_coeff;\n  }\n  // printf(\"crash_cost %f\\n\", crash_cost);\n  return crash_cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __host__ __device__ float ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::getTrackCost(float* s, int* crash)\n{\n  float track_cost = 0;\n\n  // Compute a transformation to get the (x,y) positions of the front and back of the car.\n#ifdef __CUDA_ARCH__\n  float x_front = s[0] + this->FRONT_D * __cosf(s[2]);\n  float y_front = s[1] + this->FRONT_D * __sinf(s[2]);\n  float x_back = s[0] + this->BACK_D * __cosf(s[2]);\n  float y_back = s[1] + this->BACK_D * __sinf(s[2]);\n#else\n  float x_front = s[0] + this->FRONT_D * cosf(s[2]);\n  float y_front = s[1] + this->FRONT_D * sinf(s[2]);\n  float x_back = s[0] + this->BACK_D * cosf(s[2]);\n  float y_back = s[1] + this->BACK_D * sinf(s[2]);\n#endif\n\n  // Cost of front of the car\n  // printf(\"front before %f, %f\\n\", x_front, y_front);\n  float track_cost_front = queryTextureTransformed(x_front, y_front).x;\n  // printf(\"front after %f, %f = %f\\n\", x_front, y_front, track_cost_front);\n  // Cost for back of the car\n  // printf(\"back before %f, %f\\n\", x_back, y_back);\n  float track_cost_back = queryTextureTransformed(x_back, y_back).x;\n  // printf(\"back after %f, %f = %f\\n\", x_back, y_back, track_cost_back);\n\n  track_cost = (fabs(track_cost_front) + fabs(track_cost_back)) / 2.0;\n  if (fabs(track_cost) < this->params_.track_slop)\n  {\n    track_cost = 0;\n  }\n  else\n  {\n    track_cost = this->params_.track_coeff * track_cost;\n  }\n  if (track_cost_front >= this->params_.boundary_threshold || track_cost_back >= this->params_.boundary_threshold)\n  {\n    crash[0] = 1;\n  }\n  // printf(\"track_cost %f\\n\", track_cost);\n  return track_cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\ninline __device__ float ARStandardCostImpl<CLASS_T, PARAMS_T, DYN_PARAMS_T>::computeStateCost(float* s, int timestep,\n                                                                                              float* theta_c,\n                                                                                              int* crash_status)\n{\n  // printf(\"input state %f %f %f %f %f %f %f\\n\", s[0], s[1], s[2], s[3], s[4], s[5], s[6]);\n  /*\n  int global_idx = blockDim.x * blockIdx.x + threadIdx.x;\n  if(global_idx == 0) {\n    printf(\"desired_speed %f\\n\", this->params_.desired_speed);\n    printf(\"speed_coeff %f\\n\", this->params_.speed_coeff);\n    printf(\"track_coeff %f\\n\", this->params_.track_coeff);\n    printf(\"max_slip_angle %f\\n\", this->params_.max_slip_ang);\n    printf(\"slip_coeff %f\\n\", this->params_.slip_coeff);\n    printf(\"track_slop %f\\n\", this->params_.track_slop);\n    printf(\"crash_coeff %f\\n\", this->params_.crash_coeff);\n    printf(\"discount %f\\n\", this->params_.discount);\n    printf(\"boundary_threshold %f\\n\", this->params_.boundary_threshold);\n    printf(\"grid_res %d\\n\", this->params_.grid_res);\n    printf(\"control_cost_coeff[0] %f\\n\", this->params_.control_cost_coeff[0]);\n    printf(\"control_cost_coeff[1] %f\\n\", this->params_.control_cost_coeff[1]);\n  }*/\n  float track_cost = getTrackCost(s, crash_status);\n  float speed_cost = getSpeedCost(s, crash_status);\n  // printf(\"speed %f\\n\", speed_cost);\n  float stabilizing_cost = getStabilizingCost(s, crash_status);\n  float crash_cost = powf(this->params_.discount, timestep) * getCrashCost(s, crash_status, timestep);\n  float cost = speed_cost + crash_cost + track_cost + stabilizing_cost;\n  if (cost > MAX_COST_VALUE || isnan(cost))\n  {  // TODO Handle max cost value in a generic way\n    cost = MAX_COST_VALUE;\n  }\n  return cost;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/autorally/ar_standard_cost.cuh",
    "content": "#pragma once\n\n#ifndef AR_STANDARD_COST_CUH_\n#define AR_STANDARD_COST_CUH_\n\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/dynamics/autorally/ar_nn_model.cuh>\n#include <mppi/utils/file_utils.h>\n#include <vector>\n#include <Eigen/Dense>\n#include <cuda_runtime.h>\n#include <cnpy.h>\n\nstruct ARStandardCostParams : public CostParams<2>\n{\n  float desired_speed = 6.0;\n  float speed_coeff = 4.25;\n  float track_coeff = 200.0;\n  float max_slip_ang = 1.25;\n  float slip_coeff = 10.0;\n  float track_slop = 0;\n  float crash_coeff = 10000;\n  float boundary_threshold = 0.65;\n  // TODO remove from struct\n  int grid_res = 10;\n  /*\n   * Prospective transform matrix\n   * r_c1.x, r_c2.x, trs.x\n   * r_c1.y, r_c2.y, trs.y\n   * r_c1.z, r_c2.z, trs.z\n   */\n  float3 r_c1;  // R matrix col 1\n  float3 r_c2;  // R matrix col 2\n  float3 trs;   // translation vector\n\n  ARStandardCostParams()\n  {\n    control_cost_coeff[0] = 0.0;  // steering_coeff\n    control_cost_coeff[1] = 0.0;  // throttle_coeff\n  }\n};\n\ntemplate <class CLASS_T, class PARAMS_T = ARStandardCostParams, class DYN_PARAMS_T = NNDynamicsParams>\nclass ARStandardCostImpl : public Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>\n{\npublic:\n  //  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  using PARENT_CLASS = Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>;\n  using output_array = typename PARENT_CLASS::output_array;\n  static constexpr float MAX_COST_VALUE = 1e16;\n\n  /**\n   * Constructor\n   * @param width\n   * @param height\n   */\n  ARStandardCostImpl(cudaStream_t stream = 0);\n\n  std::string getCostFunctionName() const override\n  {\n    return \"AutoRally standard cost function\";\n  }\n\n  /**\n   * Deallocates the allocated cuda memory for an object\n   */\n  void freeCudaMem();\n\n  inline __host__ __device__ int getHeight() const\n  {\n    return height_;\n  }\n  inline __host__ __device__ int getWidth() const\n  {\n    return width_;\n  }\n  inline std::vector<float4> getTrackCostCPU() const\n  {\n    return track_costs_;\n  }\n  inline Eigen::Matrix3f getRotation();\n  inline Eigen::Array3f getTranslation();\n  // inline __host__ __device__ cudaArray* getCudaArray() {return costmapArray_d_;}\n  // inline __host__ __device__ cudaArray_t* getCudaArrayPointer() {return &costmapArray_d_;}\n  // inline __host__ __device__ cudaTextureObject_t* getCostmapTex(){return &costmap_tex_d_;};\n\n  /**\n   * Copies the parameters to the GPU object\n   */\n  void paramsToDevice();\n\n  /**\n   * alters the costmap size in CPU storage and GPU texture\n   * @param width\n   * @param height\n   * @return\n   */\n  bool changeCostmapSize(int width, int height);\n\n  /**\n   * @brief Initializes the costmap to all zeros.\n   *\n   * Initializes a float4 vector to the correct width and height and sets every value to zero on the CPU.\n   * default leaves the sizes alone\n   */\n  void clearCostmapCPU(int width = -1, int height = -1);\n\n  /**\n   * @brief Binds the member variable costmap to a CUDA texture.\n   */\n  void costmapToTexture();\n\n  __device__ float4 queryTexture(float x, float y) const;\n\n  /**\n   * @brief Loads track data from a file.\n   * @param C-string representing the path to the costmap data file.\n   * @param h Matrix representing a transform from world to (offset) costmap coordinates.\n   * @param trs Array representing the offset.\n   */\n  std::vector<float4> loadTrackData(std::string map_path);\n\n  /**\n   * @brief Updates the current costmap coordinate transform.\n   * @param h Matrix representing a transform from world to (offset) costmap coordinates.\n   * @param trs Array representing the offset.\n   */\n  void updateTransform(Eigen::MatrixXf m, Eigen::ArrayXf trs);\n\n  /**\n   * @brief Compute a coordinate transform going from world to costmap coordinates.\n   */\n  __host__ __device__ void coorTransform(float x, float y, float* u, float* v, float* w);\n\n  /**\n   * Queries the texture using coorTransform beforehand\n   */\n  __host__ __device__ float4 queryTextureTransformed(float x, float y);\n\n  /**\n   *@brief Initializes the debug window for a default 20x20 meter window.\n   */\n  // void debugDisplayInit();\n\n  /**\n   * @brief Initialize and allocate memory for debug window display\n   */\n  // void debugDisplayInit(int width_m, int height_m, int ppm);\n\n  bool getDebugDisplayEnabled()\n  {\n    return false;\n  }\n\n  /**\n   * @brief Display the debug view centered around x and y.\n   * @param x float representing the current x-coordinate\n   * @param y float representing the current y-coordinate\n   */\n  // cv::Mat getDebugDisplay(float x, float y, float heading);\n\n  /**\n   *\n   * @param description\n   * @param data\n   */\n  // void updateCostmap(std::vector<int> description, std::vector<float> data);\n\n  /**\n   *\n   * @param description\n   * @param data\n   */\n  // void updateObstacles(std::vector<int> description, std::vector<float> data);\n\n  /**\n   * @brief Returns whether or not the vehicle has crashed or not\n   */\n  //__host__ __device__ void getCrash(float* state, int* crash);\n\n  /**\n   * @brief Compute the cost for achieving a desired speed\n   */\n  __host__ __device__ float getSpeedCost(float* s, int* crash);\n\n  /**\n   * @brief Compute a penalty term for crashing\n   */\n  __host__ __device__ float getCrashCost(float* s, int* crash, int num_timestep);\n\n  /**\n   * @brief Compute some cost terms that help stabilize the car.\n   */\n  __host__ __device__ float getStabilizingCost(float* s, int* crash);\n\n  /**\n   * @brief Compute the current track cost based on the costmap.\n   * Requires using CUDA texture memory so can only be run on the GPU\n   */\n  __host__ __device__ float getTrackCost(float* s, int* crash);\n\n  /**\n   * @brief Compute all of the individual cost terms and adds them together.\n   */\n  inline __device__ float computeStateCost(float* s, int timestep, float* theta_c, int* crash_status);\n\n  /**\n   * @brief Computes the terminal cost from a state\n   */\n  __device__ float terminalCost(float* s, float* theta_c);\n\n  float terminalCost(const Eigen::Ref<const output_array> y);\n\n  // Constant variables\n  const float FRONT_D = 0.5;  ///< Distance from GPS receiver to front of car.\n  const float BACK_D = -0.5;  ///< Distance from GPS receiver to back of car.\n\nprotected:\n  bool l1_cost_ = false;  // Whether to use L1 speed cost (if false it is L2)\n\n  // Primary variables\n  int width_ = -1;                     ///< width of costmap\n  int height_ = -1;                    ///< height of costmap.\n  cudaArray* costmapArray_d_;          ///< Cuda array for texture binding.\n  cudaChannelFormatDesc channelDesc_;  ///< Cuda texture channel description.\n  cudaTextureObject_t costmap_tex_d_;  ///< Cuda texture object.\n  // TODO what does this look like on GPU side\n  std::vector<float4> track_costs_;\n\n  // Debugging variables\n  float* debug_data_;  ///< Host array for holding debug info.\n  // float* debug_data_d_; ///< Device array for holding debug info.\n  int debug_img_width_;   /// Width (in meters) of area imaged by debug view.\n  int debug_img_height_;  ///< Height (in meters) of area imaged by debug view.\n  int debug_img_ppm_;     ///< Pixels per meter for resolution of debug view.\n  int debug_img_size_;    ///< Number of pixels in the debug image.\n  bool debugging_;        ///< Indicator for if we're in debugging mode\n};\n\n#if __CUDACC__\n#include \"ar_standard_cost.cu\"\n#endif\n\nclass ARStandardCost : public ARStandardCostImpl<ARStandardCost>\n{\npublic:\n  ARStandardCost(cudaStream_t stream = 0) : ARStandardCostImpl<ARStandardCost>(stream){};\n};\n\n#endif  // AR_STANDARD_COST_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/cartpole/cartpole_quadratic_cost.cu",
    "content": "#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n\nCartpoleQuadraticCost::CartpoleQuadraticCost(cudaStream_t stream)\n{\n  bindToStream(stream);\n}\n\nfloat CartpoleQuadraticCost::computeStateCost(const Eigen::Ref<const output_array> s, int timestep, int* crash_status)\n{\n  return (s[0] - params_.desired_terminal_state[0]) * (s[0] - params_.desired_terminal_state[0]) *\n             params_.cart_position_coeff +\n         (s[1] - params_.desired_terminal_state[1]) * (s[1] - params_.desired_terminal_state[1]) *\n             params_.cart_velocity_coeff +\n         (s[2] - params_.desired_terminal_state[2]) * (s[2] - params_.desired_terminal_state[2]) *\n             params_.pole_angle_coeff +\n         (s[3] - params_.desired_terminal_state[3]) * (s[3] - params_.desired_terminal_state[3]) *\n             params_.pole_angular_velocity_coeff;\n}\n\n__device__ float CartpoleQuadraticCost::computeStateCost(float* state, int timestep, float* theta_c, int* crash_status)\n{\n  return (state[0] - params_.desired_terminal_state[0]) * (state[0] - params_.desired_terminal_state[0]) *\n             params_.cart_position_coeff +\n         (state[1] - params_.desired_terminal_state[1]) * (state[1] - params_.desired_terminal_state[1]) *\n             params_.cart_velocity_coeff +\n         (state[2] - params_.desired_terminal_state[2]) * (state[2] - params_.desired_terminal_state[2]) *\n             params_.pole_angle_coeff +\n         (state[3] - params_.desired_terminal_state[3]) * (state[3] - params_.desired_terminal_state[3]) *\n             params_.pole_angular_velocity_coeff;\n}\n\n__device__ float CartpoleQuadraticCost::terminalCost(float* state, float* theta_c)\n{\n  return ((state[0] - params_.desired_terminal_state[0]) * (state[0] - params_.desired_terminal_state[0]) *\n              params_.cart_position_coeff +\n          (state[1] - params_.desired_terminal_state[1]) * (state[1] - params_.desired_terminal_state[1]) *\n              params_.cart_velocity_coeff +\n          (state[2] - params_.desired_terminal_state[2]) * (state[2] - params_.desired_terminal_state[2]) *\n              params_.pole_angle_coeff +\n          (state[3] - params_.desired_terminal_state[3]) * (state[3] - params_.desired_terminal_state[3]) *\n              params_.pole_angular_velocity_coeff) *\n         params_.terminal_cost_coeff;\n}\nfloat CartpoleQuadraticCost::terminalCost(const Eigen::Ref<const output_array> state)\n{\n  return ((state[0] - params_.desired_terminal_state[0]) * (state[0] - params_.desired_terminal_state[0]) *\n              params_.cart_position_coeff +\n          (state[1] - params_.desired_terminal_state[1]) * (state[1] - params_.desired_terminal_state[1]) *\n              params_.cart_velocity_coeff +\n          (state[2] - params_.desired_terminal_state[2]) * (state[2] - params_.desired_terminal_state[2]) *\n              params_.pole_angle_coeff +\n          (state[3] - params_.desired_terminal_state[3]) * (state[3] - params_.desired_terminal_state[3]) *\n              params_.pole_angular_velocity_coeff) *\n         params_.terminal_cost_coeff;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh",
    "content": "#pragma once\n\n#ifndef CARTPOLE_QUADRATIC_COST_CUH_\n#define CARTPOLE_QUADRATIC_COST_CUH_\n\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/utils/file_utils.h>\n\nstruct CartpoleQuadraticCostParams : public CostParams<1>\n{\n  float cart_position_coeff = 1000;\n  float cart_velocity_coeff = 100;\n  float pole_angle_coeff = 2000;\n  float pole_angular_velocity_coeff = 100;\n  float terminal_cost_coeff = 0;\n  float desired_terminal_state[4] = { 0, 0, M_PI, 0 };\n\n  CartpoleQuadraticCostParams()\n  {\n    this->control_cost_coeff[0] = 10.0;\n  }\n};\n\nclass CartpoleQuadraticCost : public Cost<CartpoleQuadraticCost, CartpoleQuadraticCostParams, CartpoleDynamicsParams>\n{\npublic:\n  /**\n   * Constructor\n   * @param width\n   * @param height\n   */\n  CartpoleQuadraticCost(cudaStream_t stream = 0);\n\n  /**\n   * @brief Compute the state cost\n   */\n  __device__ float computeStateCost(float* s, int timestep = 0, float* theta_c = nullptr, int* crash_status = nullptr);\n\n  /**\n   * @brief Compute the state cost on the CPU\n   */\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep = 0, int* crash_status = nullptr);\n\n  /**\n   * @brief Compute the terminal cost of the system\n   */\n  __device__ float terminalCost(float* s, float* theta_c);\n\n  float terminalCost(const Eigen::Ref<const output_array> s);\n\nprotected:\n};\n\n#if __CUDACC__\n#include \"cartpole_quadratic_cost.cu\"\n#endif\n\n#endif  // CARTPOLE_QUADRATIC_COST_CUH_// Include the cart pole cost.\n"
  },
  {
    "path": "include/mppi/cost_functions/cost.cu",
    "content": "#include <mppi/cost_functions/cost.cuh>\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>::paramsToDevice()\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(&cost_d_->params_, &params_, sizeof(PARAMS_T), cudaMemcpyHostToDevice, stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(stream_));\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>::freeCudaMem()\n{\n  if (GPUMemStatus_)\n  {\n    cudaFree(cost_d_);\n    GPUMemStatus_ = false;\n    cost_d_ = nullptr;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nvoid Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>::GPUSetup()\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  if (!GPUMemStatus_)\n  {\n    cost_d_ = Managed::GPUSetup<CLASS_T>(derived);\n  }\n  else\n  {\n    this->logger_->debug(\"%s: GPU Memory already set.\\n\", derived->getCostFunctionName().c_str());\n  }\n  derived->paramsToDevice();\n}\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\n__device__ float Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>::computeRunningCost(float* y, float* u, int timestep,\n                                                                           float* theta_c, int* crash)\n{\n  if (threadIdx.y == 0)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    return derived->computeStateCost(y, timestep, theta_c, crash) +\n           derived->computeControlCost(u, timestep, theta_c, crash);\n  }\n  else\n  {\n    return 0.0f;\n  }\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/cost.cuh",
    "content": "#pragma once\n/*\nHeader file for costs\n*/\n\n#ifndef COSTS_CUH_\n#define COSTS_CUH_\n\n#include <Eigen/Dense>\n#include <stdio.h>\n#include <math.h>\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/utils/managed.cuh>\n\n#include <stdexcept>\n\ntemplate <int C_DIM>\nstruct CostParams\n{\n  static const int CONTROL_DIM = C_DIM;\n  float control_cost_coeff[C_DIM];\n  float discount = 1.0f;\n  CostParams()\n  {\n    // Default set all controls to 1\n    for (int i = 0; i < C_DIM; ++i)\n    {\n      control_cost_coeff[i] = 1.0f;\n    }\n  }\n};\n\n// https://cboard.cprogramming.com/cplusplus-programming/122412-crtp-how-pass-type.html\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T = DynamicsParams>\nclass Cost : public Managed\n{\npublic:\n  //  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  /**\n   * typedefs for access to templated class from outside classes\n   */\n  using ControlIndex = typename DYN_PARAMS_T::ControlIndex;\n  using OutputIndex = typename DYN_PARAMS_T::OutputIndex;\n  using TEMPLATED_DYN_PARAMS = DYN_PARAMS_T;\n  static const int CONTROL_DIM = E_INDEX(ControlIndex, NUM_CONTROLS);\n  static const int OUTPUT_DIM = E_INDEX(OutputIndex, NUM_OUTPUTS);  // TODO\n  typedef CLASS_T COST_T;\n  typedef PARAMS_T COST_PARAMS_T;\n  typedef Eigen::Matrix<float, CONTROL_DIM, 1> control_array;             // Control at a time t\n  typedef Eigen::Matrix<float, CONTROL_DIM, CONTROL_DIM> control_matrix;  // Control at a time t\n  typedef Eigen::Matrix<float, OUTPUT_DIM, 1> output_array;               // Output at a time t\n\n  Cost() = default;\n  /**\n   * Destructor must be virtual so that children are properly\n   * destroyed when called from a basePlant reference\n   */\n  virtual ~Cost()\n  {\n    freeCudaMem();\n  }\n\n  virtual std::string getCostFunctionName() const\n  {\n    return \"cost function name not set\";\n  }\n\n  void GPUSetup();\n\n  bool getDebugDisplayEnabled()\n  {\n    return false;\n  }\n\n  /**\n   * Updates the cost parameters\n   * @param params\n   */\n  void setParams(const PARAMS_T& params)\n  {\n    params_ = params;\n    if (GPUMemStatus_)\n    {\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice();\n    }\n  }\n\n  __host__ __device__ PARAMS_T getParams()\n  {\n    return params_;\n  }\n\n  void paramsToDevice();\n\n  /**\n   *\n   * @param description\n   * @param data\n   */\n  void updateCostmap(std::vector<int> description, std::vector<float> data){};\n\n  /**\n   * deallocates the allocated cuda memory for an object\n   */\n  void freeCudaMem();\n\n  /**\n   * Computes the feedback control cost on CPU for RMPPI\n   */\n  float computeFeedbackCost(const Eigen::Ref<const control_array> fb_u, const Eigen::Ref<const control_array> std_dev,\n                            const float lambda = 1.0f, const float alpha = 0.0f)\n  {\n    float cost = 0.0f;\n    for (int i = 0; i < CONTROL_DIM; i++)\n    {\n      cost += params_.control_cost_coeff[i] * SQ(fb_u(i)) / SQ(std_dev(i));\n    }\n\n    return 0.5f * lambda * (1.0f - alpha) * cost;\n  }\n\n  /**\n   * Computes the control cost on CPU. This is the normal control cost calculation\n   * in MPPI and Tube-MPPI\n   */\n  float computeControlCost(const Eigen::Ref<const control_array> u, int timestep, int* crash)\n  {\n    return 0.0f;\n  }\n  // =================== METHODS THAT SHOULD HAVE NO DEFAULT ==========================\n  /**\n   * Computes the state cost on the CPU. Should be implemented in subclasses\n   */\n  float computeStateCost(const Eigen::Ref<const output_array> y, int timestep, int* crash_status)\n  {\n    throw std::logic_error(\"SubClass did not implement computeStateCost\");\n  }\n\n  /**\n   *\n   * @param s current state as a float array\n   * @return state cost on GPU\n   */\n  __device__ float computeStateCost(float* y, int timestep, float* theta_c, int* crash_status);\n\n  /**\n   * Computes the state cost on the CPU. Should be implemented in subclasses\n   */\n  float terminalCost(const Eigen::Ref<const output_array> y)\n  {\n    throw std::logic_error(\"SubClass did not implement terminalCost\");\n  }\n\n  /**\n   *\n   * @param s terminal state as float array\n   * @return terminal cost on GPU\n   */\n  __device__ float terminalCost(float* y, float* theta_c);\n\n  /**\n   * Method to allow setup of costs on the CPU. This is needed for\n   * initializing the memory of an LSTM for example\n   */\n  void initializeCosts(const Eigen::Ref<const output_array>& output, const Eigen::Ref<const control_array>& control,\n                       float t_0, float dt)\n  {\n  }\n\n  /**\n   * Method to allow setup of costs on the GPU. This is needed for\n   * initializing the memory of an LSTM for example\n   */\n  __device__ void initializeCosts(float* output, float* control, float* theta_c, float t_0, float dt)\n  {\n  }\n\n  // ================ END OF METHODS WITH NO DEFAULT ===========================\n\n  // =================== METHODS THAT SHOULD NOT BE OVERWRITTEN ================\n  /**\n   * Computes the feedback control cost on GPU used in RMPPI. There is an\n   * assumption that we are provided std_dev and the covriance matrix is\n   * diagonal.\n   */\n  __device__ float computeFeedbackCost(float* fb_u, float* std_dev, float lambda = 1.0f, float alpha = 0.0f)\n  {\n    float cost = 0.0f;\n    for (int i = 0; i < CONTROL_DIM; i++)\n    {\n      cost += params_.control_cost_coeff[i] * SQ(fb_u[i] / std_dev[i]);\n    }\n    return 0.5f * lambda * (1.0f - alpha) * cost;\n  }\n\n  /**\n   * Computes the normal control cost for MPPI and Tube-MPPI\n   * 0.5 * lambda * (u*^T \\Sigma^{-1} u*^T + 2 * u*^T \\Sigma^{-1} (u*^T + noise))\n   * On the GPU, u = u* + noise already, so we need the following to create\n   * the original cost:\n   * 0.5 * lambda * (u - noise)^T \\Sigma^{-1} (u + noise)\n   */\n  __device__ float computeControlCost(float* u, int timestep, float* theta_c, int* crash)\n  {\n    return 0.0f;\n  }\n  // =================== END METHODS THAT SHOULD NOT BE OVERWRITTEN ============\n\n  // =================== METHODS THAT CAN BE OVERWRITTEN =======================\n  float computeRunningCost(const Eigen::Ref<const output_array> y, const Eigen::Ref<const control_array> u,\n                           int timestep, int* crash)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n\n    return derived->computeStateCost(y, timestep, crash) +\n           derived->computeControlCost(u, timestep, crash);\n  }\n\n  __device__ float computeRunningCost(float* y, float* u,\n                                      int timestep, float* theta_c, int* crash);\n  // =================== END METHODS THAT CAN BE OVERWRITTEN ===================\n\n  inline __host__ __device__ PARAMS_T getParams() const\n  {\n    return params_;\n  }\n\n  CLASS_T* cost_d_ = nullptr;\n\nprotected:\n  PARAMS_T params_;\n};\n\n#if __CUDACC__\n#include \"cost.cu\"\n#endif\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nconst int Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>::CONTROL_DIM;\n\ntemplate <class CLASS_T, class PARAMS_T, class DYN_PARAMS_T>\nconst int Cost<CLASS_T, PARAMS_T, DYN_PARAMS_T>::OUTPUT_DIM;\n#endif  // COSTS_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/double_integrator/double_integrator_circle_cost.cu",
    "content": "#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n\nDoubleIntegratorCircleCost::DoubleIntegratorCircleCost(cudaStream_t stream)\n{\n  bindToStream(stream);\n}\n\n__device__ float DoubleIntegratorCircleCost::computeStateCost(float* s, int timestep, float* theta_c, int* crash_status)\n{\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  float current_velocity = sqrtf(s[2] * s[2] + s[3] * s[3]);\n  float current_angular_momentum = s[0] * s[3] - s[1] * s[2];\n\n  float cost = 0;\n  //  if ((radial_position < params_.inner_path_radius2) ||\n  //      (radial_position > params_.outer_path_radius2)) {\n  //    crash_status[0] = 1; // Indicates the system has crashed.\n  //  }\n  //\n  //  if (crash_status[0] > 0) { // If we've crashed once, constantly add the crash cost.\n  //    cost += powf(this->params_.discount, timestep)*params_.crash_cost;\n  //  }\n\n  if ((radial_position < params_.inner_path_radius2) || (radial_position > params_.outer_path_radius2))\n  {\n    cost += powf(this->params_.discount, timestep) * params_.crash_cost;\n  }\n\n  cost += params_.velocity_cost * abs(current_velocity - params_.velocity_desired);\n  cost += params_.velocity_cost * abs(current_angular_momentum - params_.angular_momentum_desired);\n  return cost;\n}\n\nfloat DoubleIntegratorCircleCost::computeStateCost(const Eigen::Ref<const output_array> s, int timestep,\n                                                   int* crash_status)\n{\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  float current_velocity = sqrtf(s[2] * s[2] + s[3] * s[3]);\n  float current_angular_momentum = s[0] * s[3] - s[1] * s[2];\n  float cost = 0;\n\n  //  if ((radial_position < params_.inner_path_radius2) ||\n  //      (radial_position > params_.outer_path_radius2)) {\n  //    crash_status[0] = 1; // Indicates the system has crashed.\n  //  }\n  //\n  //  if (crash_status[0] > 0) { // If we've crashed once, constantly add the crash cost.\n  //    cost += powf(this->params_.discount, timestep)*params_.crash_cost;\n  //  }\n\n  if ((radial_position < params_.inner_path_radius2) || (radial_position > params_.outer_path_radius2))\n  {\n    cost += powf(this->params_.discount, timestep) * params_.crash_cost;\n  }\n\n  cost += params_.velocity_cost * abs(current_velocity - params_.velocity_desired);\n  cost += params_.velocity_cost * abs(current_angular_momentum - params_.angular_momentum_desired);\n  return cost;\n}\n\nfloat DoubleIntegratorCircleCost::terminalCost(const Eigen::Ref<const output_array> s)\n{\n  return 0;\n}\n\n__device__ float DoubleIntegratorCircleCost::terminalCost(float* state, float* theta_c)\n{\n  return 0;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh",
    "content": "#pragma once\n#ifndef DOUBLE_INTEGRATOR_CIRCLE_COST_CUH_\n#define DOUBLE_INTEGRATOR_CIRCLE_COST_CUH_\n\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n\nstruct DoubleIntegratorCircleCostParams : public CostParams<2>\n{\n  float velocity_cost = 1;\n  float crash_cost = 1000;\n  float velocity_desired = 2;\n  float inner_path_radius2 = 1.875 * 1.875;\n  float outer_path_radius2 = 2.125 * 2.125;\n  float angular_momentum_desired = 2 * velocity_desired;  // Enforces the system travels counter clockwise\n\n  DoubleIntegratorCircleCostParams()\n  {\n    control_cost_coeff[0] = 0.01;\n    control_cost_coeff[1] = 0.01;\n    discount = 1.0;\n  }\n};\n\nclass DoubleIntegratorCircleCost\n  : public Cost<DoubleIntegratorCircleCost, DoubleIntegratorCircleCostParams, DoubleIntegratorParams>\n{\npublic:\n  DoubleIntegratorCircleCost(cudaStream_t stream = nullptr);\n\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep = 0, int* crash_status = nullptr);\n  float terminalCost(const Eigen::Ref<const output_array> s);\n\n  __device__ float computeStateCost(float* s, int timestep = 0, float* theta_c = nullptr, int* crash_status = nullptr);\n  __device__ float terminalCost(float* s, float* theta_c);\n};\n\n#if __CUDACC__\n#include \"double_integrator_circle_cost.cu\"\n#endif\n\n#endif  //! DOUBLE_INTEGRATOR_CIRCLE_COST_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/double_integrator/double_integrator_robust_cost.cu",
    "content": "#include <mppi/cost_functions/double_integrator/double_integrator_robust_cost.cuh>\n#include <mppi/utils/math_utils.h>\n\nDoubleIntegratorRobustCost::DoubleIntegratorRobustCost(cudaStream_t stream)\n{\n  bindToStream(stream);\n}\n\n__device__ float DoubleIntegratorRobustCost::computeStateCost(float* s, int timestep, float* theta_c, int* crash_status)\n{\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  float current_velocity = sqrtf(s[2] * s[2] + s[3] * s[3]);\n  float current_angular_momentum = s[0] * s[3] - s[1] * s[2];\n\n  float cost = 0;\n  float normalized_dist_from_center = mppi::math::normDistFromCenter(\n      sqrt(radial_position), sqrt(params_.inner_path_radius2), sqrt(params_.outer_path_radius2));\n  float steep_percent_boundary = 0.5;\n  float steep_cost = 0.5 * params_.crash_cost;  // 10 percent of crash cost\n  // Shallow cost region\n  if (normalized_dist_from_center <= steep_percent_boundary)\n  {\n    cost += mppi::math::linInterp(normalized_dist_from_center, 0, steep_percent_boundary, 0, steep_cost);\n  }\n  // Steep cost region\n  if (normalized_dist_from_center > steep_percent_boundary && normalized_dist_from_center <= 1.0)\n  {\n    cost +=\n        mppi::math::linInterp(normalized_dist_from_center, steep_percent_boundary, 1, steep_cost, params_.crash_cost);\n  }\n  // Crash Cost region\n  if (normalized_dist_from_center > 1.0)\n  {\n    cost += params_.crash_cost;\n  }\n  cost += params_.velocity_cost * powf(current_velocity - params_.velocity_desired, 2);\n  cost += params_.velocity_cost * powf(current_angular_momentum - params_.angular_momentum_desired, 2);\n  return cost;\n}\n\nfloat DoubleIntegratorRobustCost::computeStateCost(const Eigen::Ref<const output_array> s, int timestep,\n                                                   int* crash_status)\n{\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  float current_velocity = sqrtf(s[2] * s[2] + s[3] * s[3]);\n  float current_angular_momentum = s[0] * s[3] - s[1] * s[2];\n\n  float cost = 0;\n  float normalized_dist_from_center = mppi::math::normDistFromCenter(\n      sqrt(radial_position), sqrt(params_.inner_path_radius2), sqrt(params_.outer_path_radius2));\n  float steep_percent_boundary = 0.75;\n  float steep_cost = 0.1 * params_.crash_cost;\n  if (normalized_dist_from_center <= steep_percent_boundary)\n  {\n    cost += mppi::math::linInterp(normalized_dist_from_center, 0, steep_percent_boundary, 0, steep_cost);\n  }\n  else if (normalized_dist_from_center > steep_percent_boundary && normalized_dist_from_center <= 1.0)\n  {\n    cost +=\n        mppi::math::linInterp(normalized_dist_from_center, steep_percent_boundary, 1, steep_cost, params_.crash_cost);\n  }\n  else\n  {\n    cost += params_.crash_cost;\n  }\n  cost += params_.velocity_cost * powf(current_velocity - params_.velocity_desired, 2);\n  cost += params_.velocity_cost * powf(current_angular_momentum - params_.angular_momentum_desired, 2);\n  return cost;\n}\n\nfloat DoubleIntegratorRobustCost::terminalCost(const Eigen::Ref<const output_array> s)\n{\n  return 0;\n}\n\n__device__ float DoubleIntegratorRobustCost::terminalCost(float* s, float* theta_c)\n{\n  return 0;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/double_integrator/double_integrator_robust_cost.cuh",
    "content": "#ifndef DOUBLE_INTEGRATOR_ROBUST_COST_CUH_\n#define DOUBLE_INTEGRATOR_ROBUST_COST_CUH_\n\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n\nclass DoubleIntegratorRobustCost\n  : public Cost<DoubleIntegratorRobustCost, DoubleIntegratorCircleCostParams, DoubleIntegratorParams>\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  DoubleIntegratorRobustCost(cudaStream_t stream = nullptr);\n\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep = 0, int* crash_status = nullptr);\n  float terminalCost(const Eigen::Ref<const output_array> s);\n\n  __device__ float computeStateCost(float* s, int timestep = 0, float* theta_c = nullptr, int* crash_status = nullptr);\n  __device__ float terminalCost(float* s, float* theta_c);\n\n  float getLipshitzConstantCost()\n  {\n    return params_.crash_cost;\n  };\n};\n\n#if __CUDACC__\n#include \"double_integrator_robust_cost.cu\"\n#endif\n\n#endif  //! DOUBLE_INTEGRATOR_ROBUST_COST_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/quadratic_cost/quadratic_cost.cu",
    "content": "#include <mppi/cost_functions/quadratic_cost/quadratic_cost.cuh>\n\ntemplate <class CLASS_T, class DYN_T, class PARAMS_T>\nQuadraticCostImpl<CLASS_T, DYN_T, PARAMS_T>::QuadraticCostImpl(cudaStream_t stream)\n{\n  this->bindToStream(stream);\n}\n\ntemplate <class CLASS_T, class DYN_T, class PARAMS_T>\nfloat QuadraticCostImpl<CLASS_T, DYN_T, PARAMS_T>::computeStateCost(const Eigen::Ref<const output_array> s,\n                                                                    int timestep, int* crash_status)\n{\n  float cost = 0;\n  output_array des_state = this->params_.getDesiredState(timestep);\n\n  Eigen::Matrix<float, DYN_T::OUTPUT_DIM, DYN_T::OUTPUT_DIM> coeffs;\n  coeffs = Eigen::Matrix<float, DYN_T::OUTPUT_DIM, DYN_T::OUTPUT_DIM>::Zero();\n  for (int i = 0; i < DYN_T::OUTPUT_DIM; i++)\n  {\n    coeffs(i, i) = this->params_.s_coeffs[i];\n  }\n  output_array error = s - des_state;\n  cost = error.transpose() * coeffs * error;\n\n  return cost;\n}\n\ntemplate <class CLASS_T, class DYN_T, class PARAMS_T>\nfloat QuadraticCostImpl<CLASS_T, DYN_T, PARAMS_T>::terminalCost(const Eigen::Ref<const output_array> s)\n{\n  return 0.0;\n}\n\ntemplate <class CLASS_T, class DYN_T, class PARAMS_T>\n__device__ float QuadraticCostImpl<CLASS_T, DYN_T, PARAMS_T>::computeStateCost(float* s, int timestep, float* theta_c,\n                                                                               int* crash_status)\n{\n  float cost = 0;\n\n  float* desired_state = this->params_.getGoalStatePointer(timestep);\n\n  for (int i = 0; i < DYN_T::OUTPUT_DIM; i++)\n  {\n    cost += powf(s[i] - desired_state[i], 2) * this->params_.s_coeffs[i];\n  }\n\n  return cost;\n}\n\ntemplate <class CLASS_T, class DYN_T, class PARAMS_T>\n__device__ float QuadraticCostImpl<CLASS_T, DYN_T, PARAMS_T>::terminalCost(float* s, float* theta_c)\n{\n  return 0.0;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/quadratic_cost/quadratic_cost.cuh",
    "content": "#pragma once\n/*\n * Created on Wed Dec 16 2020 by Bogdan\n */\n\n#ifndef MPPI_COST_FUNCTIONS_QUADRATIC_COST_CUH_\n#define MPPI_COST_FUNCTIONS_QUADRATIC_COST_CUH_\n\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/utils/math_utils.h>\n\ntemplate <class DYN_T, int SIM_TIME_HORIZON = 1>\nstruct QuadraticCostTrajectoryParams : public CostParams<DYN_T::CONTROL_DIM>\n{\n  typedef DYN_T TEMPLATED_DYN;\n  static const int TIME_HORIZON = SIM_TIME_HORIZON;\n  /**\n   * Defines a general desired state and coeffs\n   */\n  float s_goal[DYN_T::OUTPUT_DIM * SIM_TIME_HORIZON] = { 0 };\n\n  float s_coeffs[DYN_T::OUTPUT_DIM] = { 0 };\n\n  int current_time = 0;\n\n  QuadraticCostTrajectoryParams()\n  {\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      this->control_cost_coeff[i] = 0;\n    }\n    for (int i = 0; i < DYN_T::OUTPUT_DIM; i++)\n    {\n      this->s_coeffs[i] = 1;\n    }\n  }\n\n  const Eigen::Matrix<float, DYN_T::OUTPUT_DIM, 1> getDesiredState(int t)\n  {\n    Eigen::Matrix<float, DYN_T::OUTPUT_DIM, 1> s(s_goal + this->getIndex(t));\n    return s;\n  }\n\n  __device__ float* getGoalStatePointer(int t)\n  {\n    return s_goal + this->getIndex(t);\n  }\n\n  __host__ __device__ int getIndex(int t)\n  {\n    int index = (this->current_time + t);\n    if (index >= TIME_HORIZON)\n    {\n      index = (TIME_HORIZON - 1);\n    }\n    index *= DYN_T::OUTPUT_DIM;\n    return index;\n  }\n  __host__ __device__ void setCurrentTime(int new_time)\n  {\n    current_time = new_time;\n  }\n};\n\ntemplate <class CLASS_T, class DYN_T, class PARAMS_T>\nclass QuadraticCostImpl : public Cost<CLASS_T, PARAMS_T, typename DYN_T::DYN_PARAMS_T>\n{\npublic:\n  typedef Cost<CLASS_T, PARAMS_T, typename DYN_T::DYN_PARAMS_T> PARENT_CLASS;\n  using output_array = typename PARENT_CLASS::output_array;\n  QuadraticCostImpl(cudaStream_t stream = nullptr);\n  static constexpr float MAX_COST_VALUE = 1e16;\n\n  /**\n   * Host Functions\n   */\n  virtual std::string getCostFunctionName() const override\n  {\n    return \"Quadratic Cost\";\n  }\n\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep = 0, int* crash_status = nullptr);\n\n  float terminalCost(const Eigen::Ref<const output_array> s);\n\n  float getLipshitzConstantCost()\n  {\n    // Find the spectral radius of the state matrix\n    float rho_Q = fabsf(this->params_.s_coeffs[0]);\n    for (int i = 1; i < DYN_T::OUTPUT_DIM; i++)\n    {\n      rho_Q = fmaxf(rho_Q, fabsf(this->params_.s_coeffs[i]));\n    }\n    return 2 * rho_Q;\n  };\n\n  /**\n   * Device Functions\n   */\n  __device__ float computeStateCost(float* s, int timestep = 0, float* theta_c = nullptr, int* crash_status = nullptr);\n\n  // Custom implementation that does a Nan check.\n  // __device__ float computeRunningCost(float* s, float* u, float* noise, float* std_dev, float lambda, float alpha,\n  // int timestep, float* theta_c,  int* crash_status);\n\n  __device__ float terminalCost(float* s, float* theta_c);\n};\n\n#if __CUDACC__\n#include \"quadratic_cost.cu\"\n#endif\n\ntemplate <class DYN_T, int SIM_TIME_HORIZON>\nclass QuadraticCostTrajectory : public QuadraticCostImpl<QuadraticCostTrajectory<DYN_T, SIM_TIME_HORIZON>, DYN_T,\n                                                         QuadraticCostTrajectoryParams<DYN_T, SIM_TIME_HORIZON>>\n{\npublic:\n  QuadraticCostTrajectory(cudaStream_t stream = nullptr)\n    : QuadraticCostImpl<QuadraticCostTrajectory, DYN_T, QuadraticCostTrajectoryParams<DYN_T, SIM_TIME_HORIZON>>(\n          stream){};\n};\n\ntemplate <class DYN_T>\nclass QuadraticCost : public QuadraticCostImpl<QuadraticCost<DYN_T>, DYN_T, QuadraticCostTrajectoryParams<DYN_T>>\n{\npublic:\n  QuadraticCost(cudaStream_t stream = nullptr)\n    : QuadraticCostImpl<QuadraticCost, DYN_T, QuadraticCostTrajectoryParams<DYN_T>>(stream){};\n};\n\n#endif  // MPPI_COST_FUNCTIONS_QUADRATIC_COST_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/quadrotor/quadrotor_map_cost.cu",
    "content": "#include <cnpy.h>\n#include <mppi/utils/file_utils.h>\n#include <mppi/cost_functions/quadrotor/quadrotor_map_cost.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n\ntemplate <class CLASS_T, class PARAMS_T>\nQuadrotorMapCostImpl<CLASS_T, PARAMS_T>::QuadrotorMapCostImpl(cudaStream_t stream)\n{\n  tex_helper_ = new TwoDTextureHelper<float>(1, stream);\n  this->bindToStream(stream);\n}\ntemplate <class CLASS_T, class PARAMS_T>\nQuadrotorMapCostImpl<CLASS_T, PARAMS_T>::~QuadrotorMapCostImpl()\n{\n  delete tex_helper_;\n}\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::bindToStream(cudaStream_t stream)\n{\n  PARENT_CLASS::bindToStream(stream);\n  tex_helper_->bindToStream(stream);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    // HANDLE_ERROR(cudaFreeArray(costmapArray_d_));\n    // HANDLE_ERROR(cudaDestroyTextureObject(costmap_tex_d_));\n    tex_helper_->freeCudaMem();\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::GPUSetup()\n{\n  PARENT_CLASS* derived = static_cast<PARENT_CLASS*>(this);\n  tex_helper_->GPUSetup();\n  derived->GPUSetup();\n  HANDLE_ERROR(cudaMemcpyAsync(&(this->cost_d_->tex_helper_), &(tex_helper_->ptr_d_), sizeof(TwoDTextureHelper<float>*),\n                               cudaMemcpyHostToDevice, this->stream_));\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::paramsToDevice()\n{\n  if (this->GPUMemStatus_)\n  {\n    // HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->params_, &this->params_, sizeof(PARAMS_T), cudaMemcpyHostToDevice,\n    //                              this->stream_));\n    // HANDLE_ERROR(\n    //     cudaMemcpyAsync(&this->cost_d_->width_, &width_, sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n    // HANDLE_ERROR(\n    //     cudaMemcpyAsync(&this->cost_d_->height_, &height_, sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n    // HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n    tex_helper_->copyToDevice();\n  }\n  PARENT_CLASS::paramsToDevice();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nfloat QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeStateCost(const Eigen::Ref<const output_array> s, int timestep,\n                                                                int* crash_status)\n{\n  // TODO query texture on CPU\n  float cost = 0;\n  cost += computeGateSideCost(s.data());\n  cost += computeHeightCost(s.data());\n  cost += computeHeadingCost(s.data());\n  cost += computeSpeedCost(s.data());\n  cost += computeStabilizingCost(s.data());\n  cost += computeWaypointCost(s.data());\n\n  // Decrease cost if we pass a gate\n  float dist_to_gate = distToWaypoint(s.data(), this->params_.curr_waypoint);\n\n  if (dist_to_gate < this->params_.gate_margin)\n  {\n    // if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 &&\n    //     blockIdx.z == 0)\n    // {\n    //   printf(\"Passing through a gate: state_vec: (%f, %f, %f)\\n\", s[O_IND_CLASS(DYN_T::PARAMS_T, POS_X)],\n    //   s[O_IND_CLASS(typename DYN_T::PARAMS_T, POS_Y), s[O_IND_CLASS(DYN_T::PARAMS_T, POS_Z)]]);\n    // }\n    cost += this->params_.gate_pass_cost;\n  }\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeStateCost(float* s, int timestep, float* theta_c,\n                                                                           int* crash_status)\n{\n  float cost = 0;\n  float costmap_cost, gate_cost, height_cost, heading_cost, speed_cost, stable_cost;\n  float waypoint_cost;\n  costmap_cost = computeCostmapCost(s);\n  gate_cost = computeGateSideCost(s);\n  height_cost = computeHeightCost(s);\n  heading_cost = computeHeadingCost(s);\n  speed_cost = computeSpeedCost(s);\n  stable_cost = computeStabilizingCost(s);\n  waypoint_cost = computeWaypointCost(s);\n  if (gate_cost != 0)\n  {\n    *crash_status = 1;\n    if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 &&\n        blockIdx.z == 0)\n    {\n      printf(\"hitting the gate?\\n\");\n    }\n  }\n\n  // if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z ==\n  // 0 && timestep == 1)\n  // {\n  //   // if (isnan(costmap_cost) || isnan(gate_cost) || isnan(height_cost) || isnan(heading_cost) || isnan(speed_cost)\n  //   //     ||\n  //   //     isnan(stable_cost) || isnan(waypoint_cost))\n  //   // {\n  //     printf(\n  //         \"Costs rollout %d: Costmap %5.2f, Gate %5.2f, Height %5.2f,\"\n  //         \" Heading %5.2f, Speed %5.2f, Stabilization %5.2f, Waypoint %5.2f\\n\", timestep,\n  //         costmap_cost, gate_cost, height_cost, heading_cost, speed_cost, stable_cost, waypoint_cost);\n  //   // }\n  //   // printf(\"Costs %d: height - %5.2f, stable - %5.2f prev_height %5.2f, cur_height: %5.2f\\n\", timestep,\n  //   //        height_cost,\n  //   //        stable_cost, this->params_.prev_waypoint.z, this->params_.curr_waypoint.z);\n  // }\n\n  cost += costmap_cost + gate_cost + height_cost + heading_cost + speed_cost + stable_cost;\n\n  // Decrease cost if we pass a gate\n  float dist_to_gate = distToWaypoint(s, this->params_.curr_waypoint);\n\n  if (dist_to_gate < this->params_.gate_margin)\n  {\n    cost += this->params_.gate_pass_cost;\n  }\n  cost += *crash_status * this->params_.crash_coeff;\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::distToWaypoint(float* s, float4 waypoint)\n{\n  float dist = sqrt(SQ(s[E_INDEX(OutputIndex, POS_X)] - waypoint.x) + SQ(s[E_INDEX(OutputIndex, POS_Y)] - waypoint.y) +\n                    SQ(s[E_INDEX(OutputIndex, POS_Z)] - waypoint.z));\n\n  return dist;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::updateWaypoint(float4 new_waypoint)\n{\n  updateWaypoint(new_waypoint.x, new_waypoint.y, new_waypoint.z, new_waypoint.w);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::updateWaypoint(float x, float y, float z, float heading)\n{\n  if (this->params_.updateWaypoint(x, y, z, heading))\n  {\n    paramsToDevice();\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::updateGateBoundaries(float3 left_side, float3 right_side)\n{\n  updateGateBoundaries(left_side.x, left_side.y, left_side.z, right_side.x, right_side.y, right_side.z);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::updateGateBoundaries(std::vector<float> boundaries)\n{\n  if (boundaries.size() < 6)\n  {\n    std::cerr << \"You need \" << 6 - boundaries.size() << \" more floats in the\"\n              << \" call to updateGateBoundaries\" << std::endl;\n    return;\n  }\n  updateGateBoundaries(boundaries[0], boundaries[1], boundaries[2], boundaries[3], boundaries[4], boundaries[5]);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::updateGateBoundaries(float left_x, float left_y, float left_z,\n                                                                   float right_x, float right_y, float right_z)\n{\n  if (this->params_.updateGateBoundaries(left_x, left_y, left_z, right_x, right_y, right_z))\n  {\n    paramsToDevice();\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeStabilizingCost(float* s)\n{\n  float cost = 0;\n  float roll, pitch, yaw;\n  mppi::math::Quat2EulerNWU(&s[6], roll, pitch, yaw);\n\n  float quat_dist_from_level = SQ(roll) + SQ(pitch);\n  cost += this->params_.attitude_coeff * quat_dist_from_level;\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeHeadingCost(float* s)\n{\n  float cost = 0;\n  // float roll, pitch, yaw;\n  // mppi::math::Quat2EulerNWU(&s[6], roll, pitch, yaw);\n  float R[3][3];\n  mppi::math::Quat2DCM(&s[E_INDEX(OutputIndex, QUAT_W)], R);\n  const float& vx = s[E_INDEX(OutputIndex, VEL_X)];\n  const float& vy = s[E_INDEX(OutputIndex, VEL_Y)];\n  const float& vz = s[E_INDEX(OutputIndex, VEL_Z)];\n  float3 w_v = make_float3(R[0][0] * vx + R[0][1] * vy + R[0][2] * vz, R[1][0] * vx + R[1][1] * vy + R[1][2] * vz,\n                           R[2][0] * vx + R[2][1] * vy + R[2][2] * vz);\n  float yaw = atan2f(w_v.y, w_v.x);\n\n  // Calculate heading to gate\n  float wx = this->params_.curr_waypoint.x - s[E_INDEX(OutputIndex, POS_X)];\n  float wy = this->params_.curr_waypoint.y - s[E_INDEX(OutputIndex, POS_Y)];\n  float w_heading = atan2f(wy, wx);\n\n  float dist_to_gate = distToWaypoint(s, this->params_.curr_waypoint);\n  // Far away from the gate, we want to be pointing at the gate\n  if (dist_to_gate > this->params_.gate_margin)\n  {\n    cost += this->params_.heading_coeff *\n            powf(fabsf(angle_utils::shortestAngularDistance(w_heading, yaw)), this->params_.heading_power);\n  }\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeSpeedCost(float* s)\n{\n  float cost = 0;\n  float speed = sqrt(s[E_INDEX(OutputIndex, VEL_X)] * s[E_INDEX(OutputIndex, VEL_X)] +\n                     s[E_INDEX(OutputIndex, VEL_Y)] * s[E_INDEX(OutputIndex, VEL_Y)]);\n  float desired_speed = this->params_.desired_speed;\n  // if (this->params_.curr_waypoint == this->params_.end_waypoint)\n  // {\n  //   desired_speed = distToWaypoint(s, this->params_.curr_waypoint);\n  // }\n  cost = this->params_.speed_coeff * SQ(speed - desired_speed);\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeWaypointCost(float* s)\n{\n  float cost = 0;\n  float dist_to_gate = distToWaypoint(s, this->params_.curr_waypoint);\n  cost = this->params_.dist_to_waypoint_coeff * SQ(dist_to_gate);\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeGateSideCost(float* s)\n{\n  float cost = 0;\n  float2 curr_left = make_float2(this->params_.curr_gate_left.x, this->params_.curr_gate_left.y);\n  float2 curr_right = make_float2(this->params_.curr_gate_right.x, this->params_.curr_gate_right.y);\n  // Calculate the side border cost\n  float dist_to_left_side =\n      sqrtf(SQ(s[E_INDEX(OutputIndex, POS_X)] - curr_left.x) + SQ(s[E_INDEX(OutputIndex, POS_Y)] - curr_left.y));\n  float dist_to_right_side =\n      sqrtf(SQ(s[E_INDEX(OutputIndex, POS_X)] - curr_right.x) + SQ(s[E_INDEX(OutputIndex, POS_Y)] - curr_right.y));\n\n  float prev_dist_to_left_side = sqrtf(SQ(s[E_INDEX(OutputIndex, POS_X)] - this->params_.prev_gate_left.x) +\n                                       SQ(s[E_INDEX(OutputIndex, POS_Y)] - this->params_.prev_gate_left.y));\n  float prev_dist_to_right_side = sqrtf(SQ(s[E_INDEX(OutputIndex, POS_X)] - this->params_.prev_gate_right.x) +\n                                        SQ(s[E_INDEX(OutputIndex, POS_Y)] - this->params_.prev_gate_right.y));\n\n  float2 gate_vec = curr_left - curr_right;\n  float2 state_vec_right = make_float2(s[E_INDEX(OutputIndex, POS_X)], s[E_INDEX(OutputIndex, POS_Y)]) - curr_right;\n  float2 state_vec_left = make_float2(s[E_INDEX(OutputIndex, POS_X)], s[E_INDEX(OutputIndex, POS_Y)]) - curr_left;\n  const float perp_dist = cross(state_vec_right, gate_vec);\n  const float comp_state_along_gate_right = dot(state_vec_right, gate_vec) / dot(gate_vec, gate_vec);\n  const float threshold = 0.5;\n  const float comp_state_along_gate_left = dot(state_vec_left, -gate_vec) / dot(gate_vec, gate_vec);\n  const float outside_gate = fmaxf(comp_state_along_gate_left, comp_state_along_gate_right);\n  // Find the side closest to the current state\n  // const float closest_side_dist =\n  //     fminf(dist_to_left_side, fminf(dist_to_right_side, fminf(prev_dist_to_left_side, prev_dist_to_right_side)));\n  // if (fabsf(perp_dist) < this->params_.min_dist_to_gate_side &&\n  //     fmaxf(comp_state_along_gate_left, comp_state_along_gate_right) > 1.0f)\n  // {  // Within perpendicular distance of min_dist and outside of gate\n  //   cost += this->params_.crash_coeff * fmaxf(comp_state_along_gate_left, comp_state_along_gate_right);\n  if (fabsf(perp_dist) < this->params_.min_dist_to_gate_side &&\n      ((comp_state_along_gate_right < 0.0f && comp_state_along_gate_right >= 0.0f - threshold) ||\n       (comp_state_along_gate_right > 1.0f && comp_state_along_gate_right <= 1.0f + threshold)))\n  {\n    cost += this->params_.crash_coeff * fabsf(comp_state_along_gate_right);\n    if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 &&\n        blockIdx.z == 0)\n    {\n      printf(\"Hitting a gate: state_vec: (%f, %f), gate_vec: (%f, %f), projection onto gate: %f\\n\", state_vec_right.x,\n             state_vec_right.y, gate_vec.x, gate_vec.y, comp_state_along_gate_right);\n    }\n  }\n  // if (closest_side_dist < this->params_.min_dist_to_gate_side)\n  // {\n  //   cost += this->params_.crash_coeff * (this->params_.min_dist_to_gate_side - closest_side_dist);\n  //   if (fmaxf(comp_state_along_gate_left, comp_state_along_gate_right) > 1.0f)\n  //   {\n  //     cost += this->params_.crash_coeff * fmaxf(comp_state_along_gate_left, comp_state_along_gate_right);\n  //     if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 &&\n  //     blockIdx.z == 0\n  //         )\n  //     {\n  //       printf(\"Hitting a gate\\n\");\n  //     }\n  //   }\n  // }\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeHeightCost(float* s)\n{\n  // Calculate height cost\n  float cost = 0;\n  // if (s[E_INDEX(OutputIndex, POS_Z)] < 1) {\n  //   return this->params_.crash_coeff;\n  // }\n  float d1 = sqrtf(SQ(s[E_INDEX(OutputIndex, POS_X)] - this->params_.prev_waypoint.x) +\n                   SQ(s[E_INDEX(OutputIndex, POS_Y)] - this->params_.prev_waypoint.y));\n  float d2 = sqrtf(SQ(s[E_INDEX(OutputIndex, POS_X)] - this->params_.curr_waypoint.x) +\n                   SQ(s[E_INDEX(OutputIndex, POS_Y)] - this->params_.curr_waypoint.y));\n\n  float w1 = d1 / (d1 + d2 + 0.001);\n  float w2 = d2 / (d1 + d2 + 0.001);\n  float interpolated_height = (1.0 - w1) * this->params_.prev_waypoint.z + (1.0 - w2) * this->params_.curr_waypoint.z;\n\n  float height_diff = SQ(fabsf(s[E_INDEX(OutputIndex, POS_Z)] - interpolated_height));\n  if (height_diff < 0)\n  {\n    cost += this->params_.crash_coeff * (1 - height_diff);\n  }\n  else\n  {\n    cost += this->params_.height_coeff * height_diff;\n  }\n  // cost += this->params_.height_coeff * height_diff;\n  if (height_diff > this->params_.gate_width)\n  {\n    cost += 400;\n  }\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::computeCostmapCost(float* s)\n{\n  float cost = 0;\n  if (!this->tex_helper_->checkTextureUse(0))\n  {\n    return cost;\n  }\n  float3 query_point =\n      make_float3(s[E_INDEX(OutputIndex, POS_X)], s[E_INDEX(OutputIndex, POS_Y)], s[E_INDEX(OutputIndex, POS_Z)]);\n  float3 tex_coords;\n  this->tex_helper_->worldPoseToTexCoord(0, query_point, tex_coords);\n  if (tex_coords.x < 0.0f || tex_coords.x > 1.0f || tex_coords.y < 0.0f || tex_coords.y > 1.0f)\n  {  // The vehicle is not in the map anymore\n    cost += this->params_.crash_coeff;\n    if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 &&\n        blockIdx.z == 0)\n    {\n      printf(\"left the map\\n\");\n    }\n  }\n  float track_cost = this->tex_helper_->queryTexture(0, tex_coords);\n\n  // Calculate cost based on distance from centerline of the track\n  if (track_cost > this->params_.track_slop)\n  {\n    cost += this->params_.track_coeff * track_cost;\n  }\n\n  if (track_cost > this->params_.track_boundary_cost)\n  {\n    // the cost at this point on the costmap indicates no longer being on the track\n    cost += this->params_.crash_coeff;\n  }\n\n  return cost;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nfloat QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::terminalCost(const Eigen::Ref<const output_array> s)\n{\n  return 0;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ float QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::terminalCost(float* s, float* theta_c)\n{\n  return 0;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nstd::vector<float4> QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::loadTrackData(std::string map_path)\n{\n  // check if file exists\n  if (!fileExists(map_path))\n  {\n    std::cerr << \"ERROR: map path invalid, \" << map_path << std::endl;\n    return std::vector<float4>();\n  }\n\n  // load the npz file\n  cnpy::npz_t map_dict = cnpy::npz_load(map_path);\n  float x_min, x_max, y_min, y_max, ppm;\n  float* xBounds = map_dict[\"xBounds\"].data<float>();\n  float* yBounds = map_dict[\"yBounds\"].data<float>();\n  float* pixelsPerMeter = map_dict[\"pixelsPerMeter\"].data<float>();\n  x_min = xBounds[0];\n  x_max = xBounds[1];\n  y_min = yBounds[0];\n  y_max = yBounds[1];\n  ppm = pixelsPerMeter[0];\n\n  int width = int((x_max - x_min) * ppm);\n  int height = int((y_max - y_min) * ppm);\n\n  if (!changeCostmapSize(width, height))\n  {\n    std::cerr << \"ERROR: load track has invalid sizes\" << std::endl;\n    return std::vector<float4>();\n  }\n\n  float* channel0 = map_dict[\"channel0\"].data<float>();\n  float* channel1 = map_dict[\"channel1\"].data<float>();\n  float* channel2 = map_dict[\"channel2\"].data<float>();\n  float* channel3 = map_dict[\"channel3\"].data<float>();\n\n  // copy the track data into CPU side storage\n  for (int i = 0; i < width_ * height_; i++)\n  {\n    // std::cout << i << \" = \" << channel0[i] << \", \" << channel1[i] << \", \" << channel2[i] << \", \" << channel3[i] <<\n    // std::endl;\n    track_costs_[i].x = channel0[i];\n    track_costs_[i].y = channel1[i];\n    track_costs_[i].z = channel2[i];\n    track_costs_[i].w = channel3[i];\n  }\n\n  Eigen::Matrix3f R;\n  Eigen::Array3f trs;\n\n  // Save the scaling and offset\n  R << 1. / (x_max - x_min), 0, 0, 0, 1. / (y_max - y_min), 0, 0, 0, 1;\n  trs << -x_min / (x_max - x_min), -y_min / (y_max - y_min), 1;\n\n  updateTransform(R, trs);\n  costmapToTexture();\n\n  return track_costs_;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nbool QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::changeCostmapSize(int width, int height)\n{\n  // TODO set flag at top that indicates memory allocation changes\n  if (height < 0 && width < 0)\n  {\n    std::cerr << \"ERROR: cannot resize costmap to size less than 1\" << std::endl;\n    return false;\n  }\n  if (height != height_ || width != width_)\n  {\n    track_costs_.resize(width * height);\n\n    // Allocate memory for the cuda array which is bound the costmap_tex_\n    // has been allocated in the past, must be freed\n    if (height_ > 0 && width_ > 0)\n    {\n      HANDLE_ERROR(cudaFreeArray(costmapArray_d_));\n    }\n    // 4 floats of size 32 bits\n    channelDesc_ = cudaCreateChannelDesc(32, 32, 32, 32, cudaChannelFormatKindFloat);\n    HANDLE_ERROR(cudaMallocArray(&costmapArray_d_, &channelDesc_, width, height));\n\n    // set all of the elements in the array to be zero\n    std::vector<float4> zero_array(width_ * height_);\n    zero_array.resize(width * height, make_float4(0, 0, 0, 0));\n    HANDLE_ERROR(cudaMemcpyToArray(costmapArray_d_, 0, 0, zero_array.data(), width * height * sizeof(float4),\n                                   cudaMemcpyHostToDevice));\n  }\n\n  width_ = width;\n  height_ = height;\n  return true;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::updateTransform(const Eigen::Ref<const Eigen::Matrix3f>& m,\n                                                              const Eigen::Ref<const Eigen::Array3f>& trs)\n{\n  this->params_.r_c1.x = m(0, 0);\n  this->params_.r_c1.y = m(1, 0);\n  this->params_.r_c1.z = m(2, 0);\n  this->params_.r_c2.x = m(0, 1);\n  this->params_.r_c2.y = m(1, 1);\n  this->params_.r_c2.z = m(2, 1);\n  this->params_.trs.x = trs(0);\n  this->params_.trs.y = trs(1);\n  this->params_.trs.z = trs(2);\n  // Move the updated parameters to gpu memory\n  if (this->GPUMemStatus_)\n  {\n    paramsToDevice();\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::costmapToTexture()\n{\n  if (width_ < 0 || height_ < 0)\n  {\n    std::cerr << \"ERROR: cannot allocate texture with zero size\" << std::endl;\n    return;\n  }\n\n  // transfer CPU version of costmap to GPU\n  float4* costmap_ptr = track_costs_.data();\n  HANDLE_ERROR(\n      cudaMemcpyToArray(costmapArray_d_, 0, 0, costmap_ptr, width_ * height_ * sizeof(float4), cudaMemcpyHostToDevice));\n  cudaStreamSynchronize(this->stream_);\n\n  // Specify texture\n  struct cudaResourceDesc resDesc;\n  memset(&resDesc, 0, sizeof(resDesc));\n  resDesc.resType = cudaResourceTypeArray;\n  resDesc.res.array.array = costmapArray_d_;\n\n  // Specify texture object parameters\n  struct cudaTextureDesc texDesc;\n  memset(&texDesc, 0, sizeof(texDesc));\n  texDesc.addressMode[0] = cudaAddressModeClamp;\n  texDesc.addressMode[1] = cudaAddressModeClamp;\n  texDesc.filterMode = cudaFilterModePoint;\n  texDesc.readMode = cudaReadModeElementType;\n  texDesc.normalizedCoords = 1;\n\n  // Destroy current texture and create new texture object\n  HANDLE_ERROR(cudaDestroyTextureObject(costmap_tex_d_));\n  HANDLE_ERROR(cudaCreateTextureObject(&costmap_tex_d_, &resDesc, &texDesc, NULL));\n\n  // copy over pointers setup up on CPU code to GPU\n  HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->costmapArray_d_, &costmapArray_d_, sizeof(cudaArray*),\n                               cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(&this->cost_d_->costmap_tex_d_, &costmap_tex_d_, sizeof(cudaTextureObject_t),\n                               cudaMemcpyHostToDevice, this->stream_));\n  cudaStreamSynchronize(this->stream_);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ float4 QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::queryTexture(float x, float y) const\n{\n  return tex2D<float4>(costmap_tex_d_, x, y);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ float4 QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::queryTextureTransformed(float x, float y)\n{\n  float u, v, w;\n  coorTransform(x, y, &u, &v, &w);\n  return queryTexture(u / w, v / w);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void QuadrotorMapCostImpl<CLASS_T, PARAMS_T>::coorTransform(float x, float y, float* u, float* v,\n                                                                                float* w)\n{\n  ////Compute a projective transform of (x, y, 0, 1)\n  // converts to the texture [0-1] coordinate system\n  u[0] = this->params_.r_c1.x * x + this->params_.r_c2.x * y + this->params_.trs.x;\n  v[0] = this->params_.r_c1.y * x + this->params_.r_c2.y * y + this->params_.trs.y;\n  w[0] = this->params_.r_c1.z * x + this->params_.r_c2.z * y + this->params_.trs.z;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/quadrotor/quadrotor_map_cost.cuh",
    "content": "/*\n * Created on Wed Jul 22 2020 by Bogdan\n */\n#ifndef MPPI_COST_FUNCTIONS_QUADROTOR_MAP_COST_CUH_\n#define MPPI_COST_FUNCTIONS_QUADROTOR_MAP_COST_CUH_\n\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/dynamics/quadrotor/quadrotor_dynamics.cuh>\n#include <mppi/utils/math_utils.h>\n#include <mppi/utils/texture_helpers/two_d_texture_helper.cuh>\n\n#include <string>\n\nstruct QuadrotorMapCostParams : public CostParams<4>\n{\n  float3 r_c1;\n  float3 r_c2;\n  float3 trs;\n\n  float attitude_coeff = 10;\n  float crash_coeff = 1000;\n  float dist_to_waypoint_coeff = 0.0;\n  float heading_coeff = 5;\n  float heading_power = 1.0;  // power to take the difference in headings to\n  float height_coeff = 5;\n  float track_coeff = 10;\n  float speed_coeff = 5;\n  float track_slop = 0.0;\n  float gate_pass_cost = -150;\n\n  float4 curr_waypoint;\n  float4 prev_waypoint;\n  float3 curr_gate_left;\n  float3 curr_gate_right;\n  float3 prev_gate_left;\n  float3 prev_gate_right;\n  float4 end_waypoint;\n\n  // if the costmap cost is above this, we are no longer on the track\n  float desired_speed = 5;            // [m/s]\n  float gate_margin = 0.5;            // [m]\n  float min_dist_to_gate_side = 0.5;  // [m] TODO find correct value for this\n  float track_boundary_cost = 2.5;\n  float gate_width = 2.15;  // [m]\n\n  QuadrotorMapCostParams()\n  {\n    control_cost_coeff[0] = 1;  // roll\n    control_cost_coeff[1] = 1;  // pitch\n    control_cost_coeff[2] = 1;  // yaw\n    control_cost_coeff[3] = 1;  // throttle\n    curr_waypoint = make_float4(0, 0, 0, 0);\n    prev_waypoint = make_float4(0, 0, 0, 0);\n\n    curr_gate_left = make_float3(0, 0, 0);\n    curr_gate_right = make_float3(0, 0, 0);\n    prev_gate_left = make_float3(0, 0, 0);\n    prev_gate_right = make_float3(0, 0, 0);\n    end_waypoint = make_float4(NAN, NAN, NAN, NAN);\n  }\n\n  bool updateWaypoint(float x, float y, float z, float heading = 0)\n  {\n    bool changed = false;\n    if (curr_waypoint.x != x || curr_waypoint.y != y || curr_waypoint.z != z || curr_waypoint.w != heading)\n    {\n      prev_waypoint = curr_waypoint;\n      curr_waypoint = make_float4(x, y, z, heading);\n      float3 curr_left = make_float3(x + cosf(heading) * gate_width, y + sinf(heading) * gate_width, z);\n      float3 curr_right = make_float3(x - cosf(heading) * gate_width, y - sinf(heading) * gate_width, z);\n      updateGateBoundaries(curr_left.x, curr_left.y, curr_left.z, curr_right.x, curr_right.y, curr_right.z);\n      changed = true;\n    }\n    return changed;\n  }\n\n  bool updateGateBoundaries(float left_x, float left_y, float left_z, float right_x, float right_y, float right_z)\n  {\n    bool changed = false;\n    if (curr_gate_left.x != left_x || curr_gate_left.y != left_y || curr_gate_left.z != left_z ||\n        curr_gate_right.x != right_x || curr_gate_right.y != right_y || curr_gate_right.z != right_z)\n    {\n      prev_gate_left = curr_gate_left;\n      prev_gate_right = curr_gate_right;\n      curr_gate_left = make_float3(left_x, left_y, left_z);\n      curr_gate_right = make_float3(right_x, right_y, right_z);\n      changed = true;\n    }\n    return changed;\n  }\n};\n\ntemplate <class CLASS_T, class PARAMS_T = QuadrotorMapCostParams>\nclass QuadrotorMapCostImpl : public Cost<CLASS_T, PARAMS_T, QuadrotorDynamicsParams>\n{\npublic:\n  // I think these typedefs are needed because this class is itself templated?\n  typedef Cost<CLASS_T, PARAMS_T, QuadrotorDynamicsParams> PARENT_CLASS;\n  using output_array = typename PARENT_CLASS::output_array;\n  using control_array = typename PARENT_CLASS::control_array;\n  using OutputIndex = typename PARENT_CLASS::OutputIndex;\n  using ControlIndex = typename PARENT_CLASS::ControlIndex;\n  using DYN_P = QuadrotorDynamicsParams;\n\n  QuadrotorMapCostImpl(cudaStream_t stream = 0);\n\n  ~QuadrotorMapCostImpl();\n\n  void bindToStream(cudaStream_t stream);\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  void paramsToDevice();\n\n  std::string getCostFunctionName() const override\n  {\n    return std::string(\"Quadrotor Map Cost\");\n  }\n\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep, int* crash_status);\n\n  __device__ float computeStateCost(float* s, int timestep, float* theta_c, int* crash_status);\n\n  float terminalCost(const Eigen::Ref<const output_array> s);\n\n  __device__ float terminalCost(float* s, float* theta_c = nullptr);\n\n  __host__ __device__ float computeGateSideCost(float* s);\n\n  __host__ __device__ float computeHeadingCost(float* s);\n\n  __host__ __device__ float computeHeightCost(float* s);\n\n  __host__ __device__ float computeSpeedCost(float* s);\n\n  __host__ __device__ float computeStabilizingCost(float* s);\n\n  __host__ __device__ float computeWaypointCost(float* s);\n\n  __host__ __device__ float distToWaypoint(float* s, float4 waypoint);\n\n  void updateWaypoint(float4 new_waypoint);\n  void updateWaypoint(float x, float y, float z, float heading = 0);\n\n  void updateGateBoundaries(float3 left_side, float3 right_side);\n  void updateGateBoundaries(std::vector<float> boundaries);\n  void updateGateBoundaries(float left_x, float left_y, float left_z, float right_x, float right_y, float right_z);\n\n  /** =================== Cost Map Related Functions ================== **/\n  __device__ float computeCostmapCost(float* s);\n\n  std::vector<float4> loadTrackData(std::string map_path);\n\n  __device__ float4 queryTexture(float x, float y) const;\n\n  /**\n   * alters the costmap size in CPU storage and GPU texture\n   * @param width\n   * @param height\n   * @return\n   */\n  bool changeCostmapSize(int width, int height);\n\n  /**\n   * @brief Binds the member variable costmap to a CUDA texture.\n   */\n  void costmapToTexture();\n\n  /**\n   * @brief Updates the current costmap coordinate transform.\n   * @param h Matrix representing a transform from world to (offset) costmap coordinates.\n   * @param trs Array representing the offset.\n   */\n  void updateTransform(const Eigen::Ref<const Eigen::Matrix3f>& m, const Eigen::Ref<const Eigen::Array3f>& trs);\n\n  /**\n   * @brief Compute a coordinate transform going from world to costmap coordinates.\n   */\n  __host__ __device__ void coorTransform(float x, float y, float* u, float* v, float* w);\n\n  /**\n   * Queries the texture using coorTransform beforehand\n   */\n  __device__ float4 queryTextureTransformed(float x, float y);\n\n  TwoDTextureHelper<float>* tex_helper_ = nullptr;\n\nprotected:\n  std::string map_path_;\n\n  // Costmap Related Variables\n  std::vector<float4> track_costs_;\n  int width_ = -1;\n  int height_ = -1;\n\n  cudaArray* costmapArray_d_;          ///< Cuda array for texture binding.\n  cudaChannelFormatDesc channelDesc_;  ///< Cuda texture channel description.\n  cudaTextureObject_t costmap_tex_d_;  ///< Cuda texture object.\n};\n\nclass QuadrotorMapCost : public QuadrotorMapCostImpl<QuadrotorMapCost>\n{\npublic:\n  QuadrotorMapCost(cudaStream_t stream = 0) : QuadrotorMapCostImpl<QuadrotorMapCost>(stream){};\n};\n\n#if __CUDACC__\n#include \"quadrotor_map_cost.cu\"\n#endif\n#endif  // MPPI_COST_FUNCTIONS_QUADROTOR_MAP_COST_CUH_\n"
  },
  {
    "path": "include/mppi/cost_functions/quadrotor/quadrotor_quadratic_cost.cu",
    "content": "#include <mppi/cost_functions/quadrotor/quadrotor_quadratic_cost.cuh>\n\nQuadrotorQuadraticCost::QuadrotorQuadraticCost(cudaStream_t stream)\n{\n  bindToStream(stream);\n}\n\n/**\n * Host Functions\n */\nfloat QuadrotorQuadraticCost::computeStateCost(const Eigen::Ref<const output_array> s, int timestep, int* crash_status)\n{\n  Eigen::Vector3f x, v, w;\n  // Eigen::Vector4f q;\n\n  Eigen::Map<const Eigen::Vector3f> x_g(this->params_.x_goal());\n  Eigen::Map<const Eigen::Vector3f> v_g(this->params_.v_goal());\n  Eigen::Map<const Eigen::Vector3f> w_g(this->params_.w_goal());\n\n  x = s.block<3, 1>(E_INDEX(OutputIndex, POS_X), 0);\n  v = s.block<3, 1>(E_INDEX(OutputIndex, VEL_X), 0);\n  w = s.block<3, 1>(E_INDEX(OutputIndex, ANG_VEL_X), 0);\n\n  // Quaternion/Angle Costs\n  Eigen::Quaternionf q(s[6], s[7], s[8], s[9]);\n  Eigen::Quaternionf q_g(this->params_.q_goal()[0], this->params_.q_goal()[1], this->params_.q_goal()[2],\n                         this->params_.q_goal()[3]);\n  Eigen::Quaternionf q_diff;\n  mppi::math::QuatSubtract(q, q_g, q_diff);\n  Eigen::VectorXf rot_diff;\n  Eigen::ArrayXf rot_coeff;\n  if (this->params_.use_euler)\n  {\n    float r_diff, p_diff, y_diff;\n    mppi::math::Quat2EulerNWU(q_diff, r_diff, p_diff, y_diff);\n\n    Eigen::Vector3f angle_diff;\n    angle_diff << r_diff, p_diff, y_diff;\n    Eigen::Array3f angle_coeff;\n    angle_coeff << this->params_.roll_coeff, this->params_.pitch_coeff, this->params_.yaw_coeff;\n    rot_diff = angle_diff;\n    rot_coeff = angle_coeff;\n  }\n  else\n  {\n    Eigen::Vector4f q_vec;\n    q_vec << q_diff.w(), q_diff.x(), q_diff.y(), q_diff.z();\n    Eigen::Array4f q_coeff;\n    q_coeff << this->params_.q_coeff, this->params_.q_coeff, this->params_.q_coeff, this->params_.q_coeff;\n    rot_diff = q_vec;\n    rot_coeff = q_coeff;\n  }\n\n  Eigen::Vector3f x_cost = this->params_.x_coeff * (x - x_g).array().square();\n  Eigen::Vector3f v_cost = this->params_.v_coeff * (v - v_g).array().square();\n  Eigen::Vector3f q_cost = rot_coeff * rot_diff.array().square();\n  Eigen::Vector3f w_cost = this->params_.w_coeff * (w - w_g).array().square();\n\n  return x_cost.sum() + v_cost.sum() + q_cost.sum() + w_cost.sum();\n}\n\nfloat QuadrotorQuadraticCost::terminalCost(const Eigen::Ref<const output_array> s)\n{\n  return this->params_.terminal_cost_coeff * computeStateCost(s);\n}\n\n/**\n * Device Functions\n */\n__device__ float QuadrotorQuadraticCost::computeStateCost(float* s, int timestep, float* theta_c, int* crash_status)\n{\n  float s_diff[OUTPUT_DIM];\n  int i;\n  float sum = 0;\n\n  for (i = 0; i < OUTPUT_DIM; i++)\n  {\n    s_diff[i] = powf(s[i] - this->params_.s_goal[i], 2);\n  }\n  float q_diff[4];\n  mppi::math::QuatSubtract(s + 6, this->params_.s_goal + 6, q_diff);\n\n  for (i = 0; i < 3; i++)\n  {\n    s_diff[i] *= this->params_.x_coeff;\n  }\n\n  for (i = 3; i < 6; i++)\n  {\n    s_diff[i] *= this->params_.v_coeff;\n  }\n\n  if (!this->params_.use_euler)\n  {\n    for (i = 6; i < 10; i++)\n    {\n      s_diff[i] = this->params_.q_coeff * q_diff[i - 6];\n    }\n  }\n  if (this->params_.use_euler)\n  {\n    // Zero out quaternion portion for later\n    for (i = 6; i < 10; i++)\n    {\n      s_diff[i] = 0;\n    }\n    // Get Euler Angles\n    float r_diff, p_diff, y_diff;\n    mppi::math::Quat2EulerNWU(q_diff, r_diff, p_diff, y_diff);\n    sum += this->params_.roll_coeff * SQ(r_diff);\n    sum += this->params_.pitch_coeff * SQ(p_diff);\n    sum += this->params_.yaw_coeff * SQ(y_diff);\n  }\n\n  for (i = 10; i < 13; i++)\n  {\n    s_diff[i] *= this->params_.w_coeff;\n  }\n\n  for (i = 0; i < OUTPUT_DIM; i++)\n  {\n    sum += s_diff[i];\n  }\n\n  // do a final nan check\n  return sum * (1 - isnan(sum)) + isnan(sum) * MAX_COST_VALUE;\n}\n\n__device__ float QuadrotorQuadraticCost::terminalCost(float* s, float* theta_c)\n{\n  float cost = this->params_.terminal_cost_coeff * computeStateCost(s);\n  return cost * (1 - isnan(cost)) + isnan(cost) * MAX_COST_VALUE;\n}\n"
  },
  {
    "path": "include/mppi/cost_functions/quadrotor/quadrotor_quadratic_cost.cuh",
    "content": "/*\n * Created on Thu Jun 11 2020 by Bogdan\n */\n#ifndef MPPI_COST_FUNCTIONS_QUADROTOR_QUADRATIC_COST_CUH_\n#define MPPI_COST_FUNCTIONS_QUADROTOR_QUADRATIC_COST_CUH_\n\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/dynamics/quadrotor/quadrotor_dynamics.cuh>\n#include <mppi/utils/math_utils.h>\nstruct QuadrotorQuadraticCostParams : public CostParams<4>\n{\n  /**\n   * State for this class is defined as follows:\n   *    x - position in 3D space (x, y, z) - meters\n   *    v - velocity in 3D space (v_x, v_y_ v_z) - meters/sec\n   *    q - quaternion (q_w, q_x, q_y, q_z)\n   *    w - angular velocities (roll_dot, pitch_dot, yaw_dot) - rad/sec\n   *\n   * Coordinate Frame is NWU\n   */\n  float s_goal[13] = { 0, 0, 0,     // x\n                       0, 0, 0,     // v\n                       1, 0, 0, 0,  // q\n                       0, 0, 0 };   // w\n\n  float* x_goal()\n  {\n    return &s_goal[0];\n  }\n  float* v_goal()\n  {\n    return &s_goal[3];\n  }\n  float* q_goal()\n  {\n    return &s_goal[6];\n  }\n  float* w_goal()\n  {\n    return &s_goal[10];\n  }\n\n  float x_coeff = 1.0;\n  float v_coeff = 1.0;\n  // Euler Angle or Quaternion scoring\n  bool use_euler = true;\n  float q_coeff = 1.0;  // Quaternion Modifier\n  float roll_coeff = 1.0;\n  float pitch_coeff = 1.0;\n  float yaw_coeff = 1.0;\n\n  float w_coeff = 1.0;\n  float terminal_cost_coeff = 0;\n\n  QuadrotorQuadraticCostParams()\n  {\n    control_cost_coeff[0] = 2.0;\n    control_cost_coeff[1] = 2.0;\n    control_cost_coeff[2] = 2.0;\n    control_cost_coeff[3] = 2.0;\n  }\n\n  Eigen::Matrix<float, 13, 1> getDesiredState()\n  {\n    Eigen::Matrix<float, 13, 1> s;\n    s << s_goal[0], s_goal[1], s_goal[2], s_goal[3], s_goal[4], s_goal[5], s_goal[6], s_goal[7], s_goal[8], s_goal[9],\n        s_goal[10], s_goal[11], s_goal[12];\n    return s;\n  }\n};\n\nclass QuadrotorQuadraticCost\n  : public Cost<QuadrotorQuadraticCost, QuadrotorQuadraticCostParams, QuadrotorDynamicsParams>\n{\n  /**\n   * State for this class is defined as follows:\n   *    x - position in 3D space (x, y, z) - meters\n   *    v - velocity in 3D space (v_x, v_y_ v_z) - meters/sec\n   *    q - quaternion (q_w, q_x, q_y, q_z)\n   *    w - angular velocities (roll_dot, pitch_dot, yaw_dot) - rad/sec\n   *\n   * Coordinate Frame is NWU\n   *\n   * Control:\n   *    roll_rate  - rad/sec\n   *    pitch_rate - rad/sec\n   *    yaw_rate   - rad/sec\n   *    thrust     - Newtons\n   */\n\npublic:\n  QuadrotorQuadraticCost(cudaStream_t stream = nullptr);\n  static constexpr float MAX_COST_VALUE = 1e16;\n\n  /**\n   * Host Functions\n   */\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep = 0, int* crash_status = nullptr);\n\n  float terminalCost(const Eigen::Ref<const output_array> s);\n\n  /**\n   * Device Functions\n   */\n  __device__ float computeStateCost(float* s, int timestep = 0, float* theta_c = nullptr, int* crash_status = nullptr);\n\n  __device__ float terminalCost(float* s, float* theta_c = nullptr);\n};\n\n#if __CUDACC__\n#include \"quadrotor_quadratic_cost.cu\"\n#endif\n\n#endif  // MPPI_COST_FUNCTIONS_QUADROTOR_QUADRATIC_COSTS_CUH_\n"
  },
  {
    "path": "include/mppi/ddp/boxqp.h",
    "content": "#ifndef TRAJOPT_BOXQP_HPP\n#define TRAJOPT_BOXQP_HPP\n\n#include \"util.h\"\n#include <Eigen/Cholesky>\n#include <Eigen/Dense>\n#include <cmath>\n#include <tuple>\n\nnamespace util\n{\ntemplate <typename T>\nstruct BoxQPOptions\n{\n  int max_iter;\n  T min_grad_norm;\n  T min_rel_improvement;\n  T step_dec;\n  T min_step;\n  T armijo;\n  bool verbose;\n\n  BoxQPOptions(int max_iter, T min_grad_norm, T min_rel_improvement, T step_dec, T min_step, T armijo, bool verbose)\n    : max_iter(max_iter)\n    , min_grad_norm(min_grad_norm)\n    , min_rel_improvement(min_rel_improvement)\n    , step_dec(step_dec)\n    , min_step(min_step)\n    , armijo(armijo)\n    , verbose(verbose)\n  {\n  }\n\n  BoxQPOptions()\n    : BoxQPOptions(100, static_cast<T>(1.0e-8), static_cast<T>(1.0e-8), static_cast<T>(0.6), static_cast<T>(1.0e-22),\n                   static_cast<T>(0.1), false)\n  {\n  }\n};\n\ntemplate <typename T, int N>\nstruct BoxQPResult\n{\n  Eigen::Matrix<T, N, 1> solution;\n  Eigen::LLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> free_chol;\n  Eigen::Matrix<bool, N, 1> free_indices;\n  int result_code;\n\n  BoxQPResult(const Eigen::Ref<const Eigen::Matrix<T, N, 1>>& solution,\n              const Eigen::LLT<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>>& free_chol,\n              const Eigen::Ref<const Eigen::Matrix<bool, N, 1>>& free_indices, int result_code)\n    : solution(solution), free_chol(free_chol), free_indices(free_indices), result_code(result_code)\n  {\n  }\n};\n\ntemplate <typename T, int N>\nclass BoxQP\n{\npublic:\n  using Scalar = T;\n  using Vector = Eigen::Matrix<Scalar, N, 1>;\n  using SquareMatrix = Eigen::Matrix<Scalar, N, N>;\n  using Hessian = SquareMatrix;\n  using Gradient = Vector;\n  using Solution = Vector;\n  using FreeIndices = Eigen::Matrix<bool, N, 1>;\n  using Result = BoxQPResult<T, N>;\n\n  static const int HESSIAN_NOT_POSITIVE_DEFINITE = -1;\n  static const int NO_DESCENT_DIRECTION = 0;\n  static const int MAX_ITERATIONS_EXCEEDED = 1;\n  static const int MAX_LINESEARCH_ITERATIONS_EXCEEDED = 2;\n  static const int NO_BOUNDS = 3;\n  static const int IMPROVEMENT_TOO_SMALL = 4;\n  static const int GRADIENT_NORM_TOO_SMALL = 5;\n  static const int ALL_DIMENSIONS_CLAMPED = 6;\n\n  BoxQP(int max_iter, Scalar min_grad_norm, Scalar min_rel_improvement, Scalar step_dec, Scalar min_step, Scalar armijo,\n        Logger* logger, bool verbose)\n    : max_iter_(max_iter)\n    , min_grad_(min_grad_norm)\n    , min_rel_improve_(min_rel_improvement)\n    , step_dec_(step_dec)\n    , min_step_(min_step)\n    , armijo_(armijo)\n    , logger_(logger)\n    , verbose_(verbose)\n  {\n  }\n\n  BoxQP(const BoxQPOptions<Scalar>& options, Logger* logger)\n    : BoxQP(options.max_iter, options.min_grad_norm, options.min_rel_improvement, options.step_dec, options.min_step,\n            options.armijo, logger, options.verbose)\n  {\n  }\n\n  BoxQP(Logger* logger) : BoxQP(BoxQPOptions<Scalar>(), logger)\n  {\n  }\n\n  Result operator()(const Eigen::Ref<const Hessian>& H, const Eigen::Ref<const Gradient>& g,\n                    const Eigen::Ref<const Vector>& lower_bound, const Eigen::Ref<const Vector>& upper_bound,\n                    const Eigen::Ref<const Vector>& x0)\n  {\n    Eigen::Matrix<bool, N, 1> clamped = Eigen::Matrix<bool, N, 1>::Constant(false);\n    Eigen::Matrix<bool, N, 1> free = Eigen::Matrix<bool, N, 1>::Constant(true);\n    Vector x = x0.cwiseMin(upper_bound).cwiseMax(lower_bound);\n    Scalar old_value = static_cast<Scalar>(0.0);\n    int result = 0;\n    Scalar gnorm = static_cast<Scalar>(0.0);\n    int nfactor = 0;\n\n    // Initial objective value\n    Scalar value = x.transpose() * g + static_cast<Scalar>(0.5) * (x.transpose() * H * x).value();\n\n    // Main loop\n    int iter;\n    for (iter = 0; iter < max_iter_; ++iter)\n    {\n      if (result != 0)\n      {\n        break;\n      }\n\n      // Check relative improvement\n      if (iter > 0 && (old_value - value) < min_rel_improve_ * std::abs(old_value))\n      {\n        result = IMPROVEMENT_TOO_SMALL;\n        if (verbose_)\n          logger_->warning(\"BoxQP: Relative improvement too small - code %d\", result);\n        break;\n      }\n      old_value = value;\n\n      // Get gradient\n      Gradient grad = g + H * x;\n\n      // Find clamped dimensions\n      Eigen::Matrix<bool, N, 1> old_clamped = clamped;\n      clamped = Eigen::Matrix<bool, N, 1>::Constant(false);\n      for (int i = 0; i < N; ++i)\n      {\n        clamped(i) = (x(i) <= lower_bound(i) && grad(i) > static_cast<Scalar>(0.0)) ||\n                     (x(i) >= upper_bound(i) && grad(i) < static_cast<Scalar>(0.0));\n        free(i) = !clamped(i);\n      }\n\n      // Check for all clamped\n      if (clamped.all())\n      {\n        result = ALL_DIMENSIONS_CLAMPED;\n        if (verbose_)\n          logger_->warning(\"BoxQP: All dimensions clamped - code %d\", result);\n        break;\n      }\n\n      // Factorize if clamped has changed\n      if (iter == 0 || (clamped.template cast<int>() - old_clamped.template cast<int>()).template cast<bool>().any())\n      {\n        Eigen::Matrix<Scalar, Eigen::Dynamic, N> Hfr(free.count(), N);\n        Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Hff(free.count(), free.count());\n        int j = 0, k = 0;\n        for (int i = 0; i < N; ++i)\n        {\n          if (free(i))\n          {\n            Hfr.row(j++) = H.row(i);\n          }\n        }\n        for (int i = 0; i < N; ++i)\n        {\n          if (free(i))\n          {\n            Hff.col(k++) = Hfr.col(i);\n          }\n        }\n\n        llt_.compute(Hff);\n        if (llt_.info() != Eigen::Success)\n        {\n          result = HESSIAN_NOT_POSITIVE_DEFINITE;\n          if (verbose_)\n            logger_->warning(\"BoxQP: Hessian not positive definite - code %d\", result);\n          break;\n        }\n\n        ++nfactor;\n      }\n\n      // Check gradient norm\n      gnorm = free.array().select(grad, Gradient::Zero()).norm();\n      if (gnorm < min_grad_)\n      {\n        result = GRADIENT_NORM_TOO_SMALL;\n        if (verbose_)\n          logger_->warning(\"BoxQP: Gradient norm too small - code %d\", result);\n        break;\n      }\n\n      // Get search direction\n      Gradient grad_clamped = g + H * (x.cwiseProduct(clamped.template cast<Scalar>()));\n      Vector search = Vector::Zero();\n      Eigen::Matrix<Scalar, Eigen::Dynamic, 1> grad_clamped_free(free.count());\n      Eigen::Matrix<Scalar, Eigen::Dynamic, 1> x_free(free.count());\n      int j = 0;\n      for (int i = 0; i < N; ++i)\n      {\n        if (free(i))\n        {\n          grad_clamped_free(j) = grad_clamped(i);\n          x_free(j) = x(i);\n          ++j;\n        }\n      }\n\n      auto search_free = -llt_.solve(grad_clamped_free) - x_free;\n      for (int i = N; i > 0; --i)\n      {\n        if (free(i - 1))\n        {\n          search(i - 1) = search_free(--j);\n        }\n      }\n\n      // Check for descent direction\n      Scalar sdotg = search.dot(grad);\n      if (sdotg >= static_cast<Scalar>(0.0))\n      {\n        result = NO_DESCENT_DIRECTION;\n        if (verbose_)\n          logger_->warning(\"BoxQP: No descent direction - code %d\", result);\n        break;\n      }\n\n      // Armijo linesearch\n      Scalar step = static_cast<Scalar>(1.0);\n      int nstep = 0;\n      Vector xc = (x + step * search).cwiseMin(upper_bound).cwiseMax(lower_bound);\n      Scalar vc = xc.transpose() * g + static_cast<Scalar>(0.5) * (xc.transpose() * H * xc).value();\n      while (((vc - old_value) / (step * sdotg)) < armijo_)\n      {\n        step *= step_dec_;\n        ++nstep;\n        xc = (x + step * search).cwiseMin(upper_bound).cwiseMax(lower_bound);\n        vc = xc.transpose() * g + static_cast<Scalar>(0.5) * (xc.transpose() * H * xc).value();\n        if (step < min_step_)\n        {\n          result = MAX_LINESEARCH_ITERATIONS_EXCEEDED;\n          if (verbose_)\n            logger_->warning(\"BoxQP: Max line search iterations exceeded - code %d\", result);\n          break;\n        }\n      }\n\n      if (verbose_)\n      {\n        logger_->info(\"iter %-3d\\tvalue %-9.5g\\t|g| %-9.3g\\treduction %-9.3g\\tlinesearch %g^%-2d\\tn_clamped %d\\n\", iter,\n                      vc, gnorm, old_value - vc, step_dec_, nstep, clamped.count());\n      }\n\n      x = xc;\n      value = vc;\n    }\n\n    if (iter >= max_iter_)\n    {\n      result = MAX_ITERATIONS_EXCEEDED;\n      if (verbose_)\n        logger_->info(\"BoxQP: All iterations completed - code %d\", result);\n    }\n\n    if (verbose_)\n    {\n      logger_->info(\"Result code %d\\titerations %d\\tgradient %-12.6g\\tfinal value %-12.6g\\tfactorizations %d\\n\", result,\n                    iter, gnorm, value, nfactor);\n    }\n\n    return { x, llt_, free, result };\n  }\n\nprivate:\n  int max_iter_;\n  Scalar min_grad_;\n  Scalar min_rel_improve_;\n  Scalar step_dec_;\n  Scalar min_step_;\n  Scalar armijo_;\n  Logger* logger_;\n  bool verbose_;\n  Eigen::LLT<Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>> llt_;\n};\n\n}  // namespace util\n\n#endif  // TRAJOPT_BOXQP_HPP\n"
  },
  {
    "path": "include/mppi/ddp/ddp.h",
    "content": "#ifndef TRAJOPT_DDP_HPP\n#define TRAJOPT_DDP_HPP\n\n#include \"boxqp.h\"\n#include \"result.h\"\n#include \"util.h\"\n#include <Eigen/Dense>\n#include <limits>\n#include <memory>\n\ntemplate <class DynamicsT>\nclass DDP\n{\n  using Scalar = typename DynamicsT::Scalar;\n  using State = typename DynamicsT::State;\n  using StateTrajectory = typename DynamicsT::StateTrajectory;\n  using Control = typename DynamicsT::Control;\n  using ControlTrajectory = typename DynamicsT::ControlTrajectory;\n\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  DDP(Scalar time_step, int time_steps, int iterations, util::Logger* logger, bool verbose,\n      util::BoxQPOptions<Scalar> qp_options = util::BoxQPOptions<Scalar>())\n    : dt_(time_step)\n    , H_(time_steps)\n    , iter_(iterations)\n    , logger_(logger)\n    , verbose_(verbose)\n    , Lk_(H_, Eigen::Matrix<Scalar, DynamicsT::ControlSize, DynamicsT::StateSize>::Zero())\n    , df_(H_, Eigen::Matrix<Scalar, DynamicsT::StateSize, DynamicsT::StateSize + DynamicsT::ControlSize>::Zero())\n    , d2L_(H_, Eigen::Matrix<Scalar, DynamicsT::StateSize + DynamicsT::ControlSize,\n                             DynamicsT::StateSize + DynamicsT::ControlSize>::Zero())\n    , Vxx_(H_, Eigen::Matrix<Scalar, DynamicsT::StateSize, DynamicsT::StateSize>::Zero())\n    , VxxT_(H_, Eigen::Matrix<Scalar, DynamicsT::StateSize, DynamicsT::StateSize>::Zero())\n    , boxqp_(qp_options, logger_)\n  {\n    x_.setZero(DynamicsT::StateSize, H_);\n    u_.setZero(DynamicsT::ControlSize, H_);\n    Is_.setIdentity();\n    dL_.setZero(DynamicsT::StateSize + DynamicsT::ControlSize, H_);\n    L_.setZero(H_);\n    Vx_.setZero(DynamicsT::StateSize, H_);\n    V_.setZero(H_);\n    qx_.setZero();\n    qu_.setZero();\n    qux_.setZero();\n    qxx_.setZero();\n    quu_.setZero();\n    lk_.setZero(DynamicsT::ControlSize, H_);\n    cost_.setZero(iter_, H_);\n  }\n\n  template <typename CostFunction, typename TerminalCostFunction>\n  OptimizerResult<DynamicsT>\n  run(const Eigen::Ref<const State>& x0, const Eigen::Ref<const ControlTrajectory>& u, DynamicsT& dynamics,\n      CostFunction& run_cost, TerminalCostFunction& term_cost,\n      const Eigen::Ref<const Control>& u_min = -Control::Constant(std::numeric_limits<Scalar>::infinity()),\n      const Eigen::Ref<const Control>& u_max = Control::Constant(std::numeric_limits<Scalar>::infinity()))\n  {\n    x_.col(0) = x0;\n    u_ = u;\n    for (int i = 1; i < H_; ++i)\n    {\n      if (i < H_ - 1)\n      {\n        // u_.col(i-1) += Lk_.at(i)*(x_.col(i-1) - x_.col(i));\n        u_.col(i - 1) = u_.col(i - 1).cwiseMin(u_max).cwiseMax(u_min);\n      }\n      x_.col(i) = x_.col(i - 1) + dynamics.f(x_.col(i - 1), u_.col(i - 1)) * dt_;\n    }\n\n    int current_iteration = 0;\n    while (current_iteration < iter_)\n    {\n      // Obtain Jacobians -- this can be parallelized but is pretty bad with OpenMP\n      for (int k = 0; k < H_; ++k)\n      {\n        df_.at(k) = dynamics.df(x_.col(k), u_.col(k)) * dt_;\n        df_.at(k).leftCols(DynamicsT::StateSize) += Is_;\n        d2L_.at(k) = run_cost.d2c(x_.col(k), u_.col(k), k);\n        // std::cout << run_cost.c(x_.col(k), u_.col(k), k) << std::endl;\n        dL_.col(k) = run_cost.dc(x_.col(k), u_.col(k), k);\n        L_(k) = run_cost.c(x_.col(k), u_.col(k), k);\n      }\n\n      // Evaluate boundary condition\n      Vxx_.at(H_ - 1) = term_cost.d2c(x_.col(H_ - 1));\n      VxxT_.at(H_ - 1) = Vxx_.at(H_ - 1).transpose();\n      Vxx_.at(H_ - 1) = static_cast<Scalar>(0.5) * (Vxx_.at(H_ - 1) + VxxT_.at(H_ - 1));\n      Vx_.col(H_ - 1) = term_cost.dc(x_.col(H_ - 1));\n      V_(H_ - 1) = term_cost.c(x_.col(H_ - 1));\n\n      // Perform backward pass\n      for (int k = H_ - 2; k >= 0; --k)\n      {\n        const auto& Phi = df_.at(k).leftCols(DynamicsT::StateSize);\n        const auto& B = df_.at(k).rightCols(DynamicsT::ControlSize);\n\n        qx_ = dL_.col(k).head(DynamicsT::StateSize) * dt_ + Phi.transpose() * Vx_.col(k + 1);\n        qu_ = dL_.col(k).tail(DynamicsT::ControlSize) * dt_ + B.transpose() * Vx_.col(k + 1);\n        qux_ = d2L_.at(k).bottomLeftCorner(DynamicsT::ControlSize, DynamicsT::StateSize) * dt_ +\n               B.transpose() * Vxx_.at(k + 1) * Phi;\n        qxx_ = d2L_.at(k).topLeftCorner(DynamicsT::StateSize, DynamicsT::StateSize) * dt_ +\n               Phi.transpose() * Vxx_.at(k + 1) * Phi;\n        quu_ = d2L_.at(k).bottomRightCorner(DynamicsT::ControlSize, DynamicsT::ControlSize) * dt_ +\n               B.transpose() * Vxx_.at(k + 1) * B;\n\n        Eigen::LDLT<Eigen::Matrix<Scalar, DynamicsT::ControlSize, DynamicsT::ControlSize> > ldlt(quu_);\n        if (ldlt.info() == Eigen::Success)\n        {\n          Lk_.at(k) = ldlt.solve(-qux_);\n          lk_.col(k) = ldlt.solve(-qu_);\n        }\n        else\n        {\n          logger_->error(\"Failed -- DEAD\\n\");\n          std::exit(-3);\n        }\n\n        // Backprop value function\n        Vxx_.at(k) = qxx_ + qux_.transpose() * Lk_.at(k);\n        VxxT_.at(k) = Vxx_.at(k).transpose();\n        Vxx_.at(k) = static_cast<Scalar>(0.5) * (Vxx_.at(k) + VxxT_.at(k));\n        Vx_.col(k) = qx_ + qux_.transpose() * lk_.col(k);\n        V_(k) = static_cast<Scalar>(0.5) * (qu_.transpose() * lk_.col(k)).value();\n      }\n\n      bool accept = false;\n      Scalar alpha = static_cast<Scalar>(1.0);\n      StateTrajectory xnew = StateTrajectory::Zero(DynamicsT::StateSize, H_);\n      ControlTrajectory unew = ControlTrajectory::Zero(DynamicsT::ControlSize, H_);\n      while (!accept)\n      {\n        xnew.col(0) = x_.col(0);\n\n        for (int k = 0; k < H_ - 1; ++k)\n        {\n          State dx = xnew.col(k) - x_.col(k);\n          unew.col(k) = u_.col(k) + alpha * lk_.col(k) + Lk_.at(k) * dx;\n          unew.col(k) = unew.col(k).cwiseMin(u_max).cwiseMax(u_min);\n          xnew.col(k + 1) = xnew.col(k) + dynamics.f(xnew.col(k), unew.col(k)) * dt_;\n          cost_(current_iteration, k) = run_cost.c(xnew.col(k), unew.col(k), k) * dt_;\n        }\n        cost_(current_iteration, H_ - 1) = V_(H_ - 1);\n\n        if (current_iteration == 0 || alpha < static_cast<Scalar>(0.0001) ||\n            cost_.row(current_iteration).sum() <= cost_.row(current_iteration - 1).sum())\n        {\n          u_ = unew;\n          x_ = xnew;\n          accept = true;\n        }\n        else\n        {\n          alpha *= static_cast<Scalar>(0.5);\n          cost_.row(current_iteration).setZero();\n          unew.setZero();\n          xnew.setZero();\n        }\n      }\n      ++current_iteration;\n    }\n\n    return OptimizerResult<DynamicsT>(current_iteration, H_, cost_.row(current_iteration - 1).sum(), cost_, x_, u_, Lk_,\n                                      lk_);\n  }\n\nprivate:\n  Scalar dt_;\n  int H_;\n  int iter_;\n  util::Logger* logger_;\n  bool verbose_;\n\n  util::EigenAlignedVector<Scalar, DynamicsT::ControlSize, DynamicsT::StateSize> Lk_;\n  util::EigenAlignedVector<Scalar, DynamicsT::StateSize, DynamicsT::StateSize + DynamicsT::ControlSize> df_;\n  util::EigenAlignedVector<Scalar, DynamicsT::StateSize + DynamicsT::ControlSize,\n                           DynamicsT::StateSize + DynamicsT::ControlSize>\n      d2L_;\n  util::EigenAlignedVector<Scalar, DynamicsT::StateSize, DynamicsT::StateSize> Vxx_;\n  util::EigenAlignedVector<Scalar, DynamicsT::StateSize, DynamicsT::StateSize> VxxT_;\n  util::BoxQP<Scalar, DynamicsT::ControlSize> boxqp_;\n  StateTrajectory x_;\n  ControlTrajectory u_;\n  Eigen::Matrix<Scalar, DynamicsT::StateSize, DynamicsT::StateSize> Is_;\n  Eigen::Matrix<Scalar, DynamicsT::StateSize + DynamicsT::ControlSize, Eigen::Dynamic> dL_;\n  Eigen::Matrix<Scalar, 1, Eigen::Dynamic> L_;\n  Eigen::Matrix<Scalar, DynamicsT::StateSize, Eigen::Dynamic> Vx_;\n  Eigen::Matrix<Scalar, 1, Eigen::Dynamic> V_;\n  State qx_;\n  Control qu_;\n  Eigen::Matrix<Scalar, DynamicsT::ControlSize, DynamicsT::StateSize> qux_;\n  Eigen::Matrix<Scalar, DynamicsT::StateSize, DynamicsT::StateSize> qxx_;\n  Eigen::Matrix<Scalar, DynamicsT::ControlSize, DynamicsT::ControlSize> quu_;\n  Eigen::Matrix<Scalar, DynamicsT::ControlSize, Eigen::Dynamic> lk_;\n  Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cost_;\n};\n\n#endif  // TRAJOPT_DDP_HPP\n"
  },
  {
    "path": "include/mppi/ddp/ddp_costs.h",
    "content": "#ifndef TRAJOPT_COSTS_HPP\n#define TRAJOPT_COSTS_HPP\n\n#include <Eigen/Dense>\n\ntemplate <class Dynamics>\nstruct CostFunction\n{\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  using Scalar = typename Dynamics::Scalar;\n  using State = typename Dynamics::State;\n  using Control = typename Dynamics::Control;\n  using Gradient = Eigen::Matrix<Scalar, Dynamics::StateSize + Dynamics::ControlSize, 1>;\n  using Hessian =\n      Eigen::Matrix<Scalar, Dynamics::StateSize + Dynamics::ControlSize, Dynamics::StateSize + Dynamics::ControlSize>;\n\n  CostFunction() = default;\n  CostFunction(const CostFunction& other) = default;\n  CostFunction(CostFunction&& other) = default;\n  CostFunction& operator=(const CostFunction& other) = default;\n  CostFunction& operator=(CostFunction&& other) = default;\n  virtual ~CostFunction() = default;\n\n  CostFunction(const Eigen::Ref<const State>& target) : xf(target)\n  {\n  }\n\n  const Eigen::Ref<const State>& target() const\n  {\n    return xf;\n  }\n  Eigen::Ref<State> target()\n  {\n    return xf;\n  }\n  const Scalar& target(int idx) const\n  {\n    return xf(idx);\n  }\n  Scalar& target(int idx)\n  {\n    return xf(idx);\n  }\n\n  virtual Scalar c(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u, int t) = 0;\n  virtual Gradient dc(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u, int t) = 0;\n  virtual Hessian d2c(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u, int t) = 0;\n\n  State xf;\n};\n\ntemplate <class Dynamics>\nstruct TerminalCostFunction\n{\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  using Scalar = typename Dynamics::Scalar;\n  using State = typename Dynamics::State;\n  using Gradient = Eigen::Matrix<Scalar, Dynamics::StateSize, 1>;\n  using Hessian = Eigen::Matrix<Scalar, Dynamics::StateSize, Dynamics::StateSize>;\n\n  TerminalCostFunction() = default;\n  TerminalCostFunction(const TerminalCostFunction& other) = default;\n  TerminalCostFunction(TerminalCostFunction&& other) = default;\n  TerminalCostFunction& operator=(const TerminalCostFunction& other) = default;\n  TerminalCostFunction& operator=(TerminalCostFunction&& other) = default;\n  virtual ~TerminalCostFunction() = default;\n\n  TerminalCostFunction(const Eigen::Ref<const State>& target) : xf(target)\n  {\n  }\n\n  const Eigen::Ref<const State>& target() const\n  {\n    return xf;\n  }\n\n  Eigen::Ref<State> target()\n  {\n    return xf;\n  }\n\n  const Scalar& target(int idx) const\n  {\n    return xf(idx);\n  }\n\n  Scalar& target(int idx)\n  {\n    return xf(idx);\n  }\n\n  virtual Scalar c(const Eigen::Ref<const State>& x) = 0;\n  virtual Gradient dc(const Eigen::Ref<const State>& x) = 0;\n  virtual Hessian d2c(const Eigen::Ref<const State>& x) = 0;\n\n  State xf;\n};\n\n#endif  // TRAJOPT_COSTS_HPP\n"
  },
  {
    "path": "include/mppi/ddp/ddp_dynamics.h",
    "content": "#ifndef TRAJOPT_DYNAMICS_HPP\n#define TRAJOPT_DYNAMICS_HPP\n\n#include \"util.h\"\n#include <Eigen/Dense>\n#include <unsupported/Eigen/NumericalDiff>\n#include <functional>\n\nnamespace internal\n{\ntemplate <class T, int S, int C>\nstruct Differentiable\n{\n  /*****************************************************************************/\n  /*** Replicate Eigen's generic functor implementation to avoid inheritance ***/\n  /*** We only use the fixed-size functionality ********************************/\n  /*****************************************************************************/\n  enum\n  {\n    InputsAtCompileTime = S + C,\n    ValuesAtCompileTime = S\n  };\n  using Scalar = T;\n  using InputType = Eigen::Matrix<T, InputsAtCompileTime, 1>;\n  using ValueType = Eigen::Matrix<T, ValuesAtCompileTime, 1>;\n  using JacobianType = Eigen::Matrix<T, ValuesAtCompileTime, InputsAtCompileTime>;\n  int inputs() const\n  {\n    return InputsAtCompileTime;\n  }\n  int values() const\n  {\n    return ValuesAtCompileTime;\n  }\n  int operator()(const Eigen::Ref<const InputType>& xu, Eigen::Ref<ValueType> dx) const\n  {\n    dx = f_(xu.template head<S>(), xu.template tail<C>());\n    return 0;\n  }\n  /*****************************************************************************/\n\n  using DiffFunc = std::function<Eigen::Matrix<T, S, 1>(const Eigen::Ref<const Eigen::Matrix<T, S, 1>>&,\n                                                        const Eigen::Ref<const Eigen::Matrix<T, C, 1>>&)>;\n  Differentiable(const DiffFunc& f) : f_(f)\n  {\n  }\n\nprivate:\n  DiffFunc f_;\n};\n\n}  // namespace internal\n\nnamespace DDP_structures\n{\ntemplate <class T, int S, int C>\nstruct Dynamics\n{\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  enum\n  {\n    StateSize = S,\n    ControlSize = C\n  };\n  using Scalar = T;\n  using State = Eigen::Matrix<T, StateSize, 1>;\n  using Control = Eigen::Matrix<T, ControlSize, 1>;\n  using StateTrajectory = Eigen::Matrix<T, StateSize, Eigen::Dynamic>;\n  using ControlTrajectory = Eigen::Matrix<T, ControlSize, Eigen::Dynamic>;\n  using Jacobian = typename internal::Differentiable<T, StateSize, ControlSize>::JacobianType;\n  using StateControl = typename internal::Differentiable<T, StateSize, ControlSize>::InputType;\n  using FeedbackGain = Eigen::Matrix<T, ControlSize, StateSize>;\n  using FeedforwardGain = Eigen::Matrix<T, ControlSize, 1>;\n  using FeedbackGainTrajectory = util::EigenAlignedVector<T, C, S>;\n  using FeedforwardGainTrajectory = Eigen::Matrix<T, ControlSize, Eigen::Dynamic>;\n\n  Dynamics()\n    : diff_([this](const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u) -> State {\n      return this->f(x, u);\n    })\n    , num_diff_(diff_)\n  {\n  }\n\n  Dynamics(const Dynamics& other) = default;\n\n  Dynamics(Dynamics&& other) = default;\n\n  Dynamics& operator=(const Dynamics& other) = default;\n\n  Dynamics& operator=(Dynamics&& other) = default;\n\n  virtual ~Dynamics() = default;\n\n  // Implementation required\n  virtual State f(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u) = 0;\n\n  // Implementation optional\n  virtual Jacobian df(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u)\n  {\n    num_diff_.df((typename internal::Differentiable<T, StateSize, ControlSize>::InputType() << x, u).finished(), j_);\n    return j_;\n  }\n\nprivate:\n  Jacobian j_;\n  typename internal::Differentiable<T, StateSize, ControlSize> diff_;\n  Eigen::NumericalDiff<typename internal::Differentiable<T, StateSize, ControlSize>, Eigen::Central> num_diff_;\n};\n}  // namespace DDP_structures\n\n#endif  // TRAJOPT_DYNAMICS_HPP\n"
  },
  {
    "path": "include/mppi/ddp/ddp_model_wrapper.h",
    "content": "#ifndef DDP_MODEL_WRAPPER_H\n#define DDP_MODEL_WRAPPER_H\n\n#include \"ddp_dynamics.h\"\n#include <type_traits>\n\ntemplate <typename T>\nstruct HasAnalyticGrad\n{\n  template <typename U>\n  static char Test(decltype(&U::computeGrad));\n  template <typename U>\n  static long Test(...);\n  static const bool Has = sizeof(Test<T>(0)) == sizeof(char);\n};\n\ntemplate <typename T>\nbool getGrad(T* model, typename DDP_structures::Dynamics<float, T::STATE_DIM, T::CONTROL_DIM>::Jacobian& jac,\n             typename DDP_structures::Dynamics<float, T::STATE_DIM, T::CONTROL_DIM>::State& x,\n             typename DDP_structures::Dynamics<float, T::STATE_DIM, T::CONTROL_DIM>::Control& u, std::true_type)\n{\n  // T::dfdx A;\n  // T::dfdu B;\n  Eigen::Matrix<float, T::STATE_DIM, T::STATE_DIM> A = Eigen::Matrix<float, T::STATE_DIM, T::STATE_DIM>::Zero();\n  Eigen::Matrix<float, T::STATE_DIM, T::CONTROL_DIM> B = Eigen::Matrix<float, T::STATE_DIM, T::CONTROL_DIM>::Zero();\n  bool exists = model->computeGrad(x, u, A, B);\n  jac.block(0, 0, T::STATE_DIM, T::STATE_DIM) = A;\n  jac.block(0, T::STATE_DIM, T::STATE_DIM, T::CONTROL_DIM) = B;\n  // for (int i = 0; i < T::STATE_DIM; i++){\n  //     for (int j = 0; j < T::STATE_DIM; j++){\n  //         jac(i,j) = A(i,j);\n  //     }\n  // }\n  return exists;\n}\n\ntemplate <typename T>\nbool getGrad(T* model, typename DDP_structures::Dynamics<float, T::STATE_DIM, T::CONTROL_DIM>::Jacobian& jac,\n             typename DDP_structures::Dynamics<float, T::STATE_DIM, T::CONTROL_DIM>::State& x,\n             typename DDP_structures::Dynamics<float, T::STATE_DIM, T::CONTROL_DIM>::Control& u, std::false_type)\n{\n  return false;\n}\n\ntemplate <class DYNAMICS_T>\nstruct ModelWrapperDDP : public DDP_structures::Dynamics<float, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>\n{\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  using Scalar = float;\n  using State = typename DDP_structures::Dynamics<Scalar, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>::State;\n  using Control = typename DDP_structures::Dynamics<Scalar, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>::Control;\n  using Jacobian = typename DDP_structures::Dynamics<Scalar, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>::Jacobian;\n  using StateTrajectory =\n      typename DDP_structures::Dynamics<Scalar, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>::StateTrajectory;\n  using ControlTrajectory =\n      typename DDP_structures::Dynamics<Scalar, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>::ControlTrajectory;\n\n  State state;\n  Control control;\n\n  DYNAMICS_T* model_;\n\n  ModelWrapperDDP(DYNAMICS_T* model)\n  {\n    model_ = model;\n  }\n\n  State f(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u)\n  {\n    // This section is specific to the neural network implementation for the autorally\n    state = x;\n    control = u;\n\n    // Compute the state derivative xDot\n    State dx;\n    State next_state;\n    typename DYNAMICS_T::output_array output;\n    // TODO\n    model_->step(state, next_state, dx, control, output, 0, 0.01);\n    return dx;\n  }\n\n  Jacobian df(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u)\n  {\n    Jacobian j_ = Jacobian::Zero(DYNAMICS_T::STATE_DIM, DYNAMICS_T::STATE_DIM + DYNAMICS_T::CONTROL_DIM);\n    state = x;\n    control = u;\n    bool analyticGradComputed =\n        getGrad(model_, j_, state, control, std::integral_constant<bool, HasAnalyticGrad<DYNAMICS_T>::Has>());\n    if (!analyticGradComputed)\n    {\n      j_ = DDP_structures::Dynamics<float, DYNAMICS_T::STATE_DIM, DYNAMICS_T::CONTROL_DIM>::df(x, u);\n    }\n    return j_;\n  }\n};\n\n#endif  // DDP_MODEL_WRAPPER_H\n"
  },
  {
    "path": "include/mppi/ddp/ddp_tracking_costs.h",
    "content": "#ifndef DDP_TRACKING_COSTS_\n#define DDP_TRACKING_COSTS_\n\n#include \"ddp_dynamics.h\"\n#include \"ddp_costs.h\"\n\ntemplate <typename DynamicsT>\nstruct TrackingCostDDP : public CostFunction<DynamicsT>\n{\n  using Dynamics = DynamicsT;\n  using Scalar = typename Dynamics::Scalar;\n  using State = typename Dynamics::State;\n  using Control = typename Dynamics::Control;\n  using Gradient = typename CostFunction<Dynamics>::Gradient;\n  using Hessian = typename CostFunction<Dynamics>::Hessian;\n\n  static const int N = Dynamics::StateSize;\n  static const int M = Dynamics::ControlSize;\n  using StateCostWeight = Eigen::Matrix<Scalar, N, N>;\n  using ControlCostWeight = Eigen::Matrix<Scalar, M, M>;\n\npublic:\n  Eigen::MatrixXf traj_target_x_;\n  Eigen::MatrixXf traj_target_u_;\n\n  TrackingCostDDP(const Eigen::Ref<const StateCostWeight>& Q, const Eigen::Ref<const ControlCostWeight>& R,\n                  int num_timesteps)\n    : Q_(Q), R_(R)\n  {\n    QR_.setZero();\n    QR_.template topLeftCorner<N, N>() = Q;\n    QR_.template bottomRightCorner<M, M>() = R;\n    traj_target_x_ = Eigen::MatrixXf::Zero(Dynamics::StateSize, num_timesteps);\n    traj_target_u_ = Eigen::MatrixXf::Zero(Dynamics::ControlSize, num_timesteps);\n  }\n\n  Scalar c(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u, int t)\n  {\n    // TODO use method inside of dynamics to compute this\n    float state_cost = ((x - traj_target_x_.col(t)).transpose() * Q_ * (x - traj_target_x_.col(t))).value();\n    float control_cost = ((u - traj_target_u_.col(t)).transpose() * R_ * (u - traj_target_u_.col(t))).value();\n    return state_cost + control_cost;\n  }\n\n  Gradient dc(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u, int t)\n  {\n    return (Gradient() << Q_ * (x - traj_target_x_.col(t)), R_ * (u - traj_target_u_.col(t))).finished();\n  }\n\n  Hessian d2c(const Eigen::Ref<const State>& x, const Eigen::Ref<const Control>& u, int t)\n  {\n    return QR_;\n  }\n\n  void setTargets(const float* traj_target, const float* control_target, int timesteps)\n  {\n    for (int t = 0; t < timesteps; t++)\n    {\n      for (int i = 0; i < Dynamics::StateSize; i++)\n      {\n        traj_target_x_(i, t) = traj_target[Dynamics::StateSize * t + i];\n      }\n    }\n\n    for (int t = 0; t < timesteps; t++)\n    {\n      for (int i = 0; i < Dynamics::ControlSize; i++)\n      {\n        traj_target_u_(i, t) = control_target[Dynamics::ControlSize * t + i];\n      }\n    }\n  }\n\n  // Still specific for autorally\n  //    void setStop(Eigen::MatrixXf state, int timesteps)\n  //    {\n  //        for (int t = 0; t < timesteps; t++){\n  //            traj_target_x_.col(t) << state;\n  //        }\n  //        traj_target_u_ = Eigen::MatrixXf::Zero(Dynamics::ControlSize, timesteps);\n  //        Q_.diagonal() << 10.0, 10.0, 25.0, 10.0, 10.0, 10.0, 10.0;\n  //    }\n\nprivate:\n  StateCostWeight Q_;\n  ControlCostWeight R_;\n  Hessian QR_;\n};\n\ntemplate <typename DynamicsT>\nstruct TrackingTerminalCost : public TerminalCostFunction<DynamicsT>\n{\n  using Dynamics = DynamicsT;\n  using Scalar = typename Dynamics::Scalar;\n  using State = typename Dynamics::State;\n  using Gradient = typename TerminalCostFunction<Dynamics>::Gradient;\n  using Hessian = typename TerminalCostFunction<Dynamics>::Hessian;\n\n  static const int N = Dynamics::StateSize;\n  using StateCostWeight = Eigen::Matrix<Scalar, N, N>;\n\npublic:\n  TrackingTerminalCost(const Eigen::Ref<const StateCostWeight>& Qf, const Eigen::Ref<const State>& xf = State::Zero())\n    : Qf_(Qf)\n  {\n    this->target() = xf;  // Initialize the target to zero\n  }\n\n  Scalar c(const Eigen::Ref<const State>& x)\n  {\n    return ((x - this->target()).transpose() * Qf_ * (x - this->target())).value();\n  }\n\n  Gradient dc(const Eigen::Ref<const State>& x)\n  {\n    return Qf_ * (x - this->target());\n  }\n\n  Hessian d2c(const Eigen::Ref<const State>& x)\n  {\n    return Qf_;\n  }\n\nprivate:\n  StateCostWeight Qf_;\n};\n\n#endif /*DDP_TRACKING_COSTS_H_*/\n"
  },
  {
    "path": "include/mppi/ddp/result.h",
    "content": "#ifndef TRAJOPT_RESULT_HPP\n#define TRAJOPT_RESULT_HPP\n\n#include <Eigen/Dense>\n\n/**\n * @tparam  Dynamics\n * @brief   Represent the return type of an optimization of subject to Dynamics.\n */\ntemplate <class Dynamics>\nstruct OptimizerResult\n{\n  using Scalar = typename Dynamics::Scalar;\n\n  OptimizerResult() = default;\n  OptimizerResult(const OptimizerResult& other) = default;\n  OptimizerResult(OptimizerResult&& other) = default;\n  ~OptimizerResult() = default;\n  OptimizerResult& operator=(const OptimizerResult& other) = default;\n  OptimizerResult& operator=(OptimizerResult&& other) = default;\n\n  /**\n   * @brief       Constructor to use if the optimizer does not produce feedback or feedforward gains.\n   * @param iter  Number of iterations performed (I)\n   * @param ts    Number of time steps (H)\n   * @param tc    Total cost of the final trajectory returned by the optimizer\n   * @param c     Cost for each time step in each iteration (I x H)\n   * @param x     Optimized state trajectory\n   * @param u     Optimized control trajectory\n   */\n  OptimizerResult(int iter, int ts, Scalar tc,\n                  const Eigen::Ref<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>& c,\n                  const Eigen::Ref<const typename Dynamics::StateTrajectory>& x,\n                  const Eigen::Ref<const typename Dynamics::ControlTrajectory>& u)\n    : iterations(iter), timesteps(ts), total_cost(tc), cost(c), state_trajectory(x), control_trajectory(u)\n  {\n  }\n\n  /**\n   * @brief       Constructor to use if the optimizer produces feedback or feedforward gains.\n   * @param iter  Number of iterations performed (I)\n   * @param ts    Number of time steps (H)\n   * @param tc    Total cost of the final trajectory returned by the optimizer\n   * @param c     Cost for each time step in each iteration (I x H)\n   * @param x     Optimized state trajectory\n   * @param u     Optimized control trajectory\n   * @param fb    Feedback gains for every time step in the optimized trajectory\n   * @param ff    Feedforward gains for every time step in the optimized trajectory\n   */\n  OptimizerResult(int iter, int ts, Scalar tc,\n                  const Eigen::Ref<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>& c,\n                  const Eigen::Ref<const typename Dynamics::StateTrajectory>& x,\n                  const Eigen::Ref<const typename Dynamics::ControlTrajectory>& u,\n                  const typename Dynamics::FeedbackGainTrajectory& fb,\n                  const Eigen::Ref<const typename Dynamics::FeedforwardGainTrajectory>& ff)\n    : iterations(iter)\n    , timesteps(ts)\n    , total_cost(tc)\n    , cost(c)\n    , state_trajectory(x)\n    , control_trajectory(u)\n    , feedback_gain(fb)\n    , feedforward_gain(ff)\n  {\n  }\n\n  int iterations;     ///< # of optimizing iterations (I)\n  int timesteps;      ///< # of timesteps (H)\n  Scalar total_cost;  ///< Total cost over the trajectory returned by the optimizer after I iterations\n  Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> cost;  ///< Cost for each time step cost per iteration (I x H)\n  typename Dynamics::StateTrajectory state_trajectory;         ///< State trajectory created by optimizer\n  typename Dynamics::ControlTrajectory control_trajectory;     ///< Control trajectory created by optimizer\n  typename Dynamics::FeedbackGainTrajectory feedback_gain;     ///< Feedback gain at each time step\n  typename Dynamics::FeedforwardGainTrajectory feedforward_gain;  ///< Feedforward gain at each time step\n};\n\n#endif  // TRAJOPT_RESULT_HPP\n"
  },
  {
    "path": "include/mppi/ddp/util.h",
    "content": "#ifndef TRAJOPT_UTIL_HPP\n#define TRAJOPT_UTIL_HPP\n\n#include <Eigen/StdVector>\n#include <cmath>\n#include <cstdarg>\n#include <cstdio>\n#include <vector>\n\n/**\n * @namespace util  Miscellaneous utilities, convenience items, and type definitions.\n */\nnamespace util\n{\n/**\n * @tparam T    Scalar type of the Eigen::Matrix being stored\n * @tparam R    Number of Matrix rows\n * @tparam C    Number of Matrix columns\n * @brief       Convenience typedef for Eigen-aligned std::vector storing fixed-size Eigen matrices.\n */\ntemplate <typename T, int R, int C>\nusing EigenAlignedVector = std::vector<Eigen::Matrix<T, R, C>, Eigen::aligned_allocator<Eigen::Matrix<T, R, C>>>;\n\n/**\n * @tparam MatrixType   Typedef'd Eigen type\n * @brief               Convenience typedef for Eigen-aligned std::vector storing fixed-size Eigen matrices.\n */\ntemplate <typename MatrixType>\nusing NamedEigenAlignedVector = std::vector<MatrixType, Eigen::aligned_allocator<MatrixType>>;\n\ntemplate <typename Dynamics>\n/**\n * @brief   Convenience function to combine State and Control into a single vector.  Note that a copy may be made if\n *          RVO is not applied.\n * @param x State vector\n * @param u Control vector\n * @return  Combined State and Control vector of size StateSize + ControlSize\n */\ninline typename Dynamics::StateControl combine_xu(const Eigen::Ref<const typename Dynamics::State>& x,\n                                                  const Eigen::Ref<const typename Dynamics::Control>& u)\n{\n  using StateControl = typename Dynamics::StateControl;\n  return (StateControl() << x, u).finished();\n}\n\n/**\n * @brief       Return the number of time steps that fit in the time horizon.\n * @param tf    Time horizon\n * @param dt    Step size\n * @return      Ceiling of the number of dt's in tf\n */\ninline int time_steps(double tf, double dt)\n{\n  return static_cast<int>(std::ceil(tf / dt));\n}\n\n/**\n * @brief   Abstract class for logging informational, error, and warning messages.\n */\nclass Logger\n{\npublic:\n  Logger() = default;\n  Logger(const Logger& other) = default;\n  Logger(Logger&& other) = default;\n  virtual ~Logger() = default;\n  Logger& operator=(const Logger& other) = default;\n  Logger& operator=(Logger&& other) = default;\n\n  /**\n   * @brief       Pure virtual function to log informational messages.\n   * @param fmt   Format string (if additional arguments are passed) or message to display\n   */\n  virtual void info(const char* fmt, ...) = 0;\n  /**\n   * @brief       Pure virtual function to log warnings.\n   * @param fmt   Format string (if additional arguments are passed) or message to display\n   */\n  virtual void warning(const char* fmt, ...) = 0;\n  /**\n   * @brief       Pure virtual function to log errors.\n   * @param fmt   Format string (if additional arguments are passed) or message to display\n   */\n  virtual void error(const char* fmt, ...) = 0;\n};\n\n/**\n * @brief   A logger that outputs to stdout for info messages and stderr for warnings and errors.\n */\nclass DefaultLogger : public Logger\n{\npublic:\n  /**\n   * @brief       Log info messages to stdout.\n   * @param fmt   Format string (if additional arguments are passed) or message to display\n   */\n  virtual void info(const char* fmt, ...)\n  {\n    std::va_list argptr;\n    va_start(argptr, fmt);\n    std::vprintf(fmt, argptr);\n    va_end(argptr);\n  }\n\n  /**\n   * @brief       Log warnings to stderr.\n   * @param fmt   Format string (if additional arguments are passed) or message to display\n   */\n  virtual void warning(const char* fmt, ...)\n  {\n    std::va_list argptr;\n    va_start(argptr, fmt);\n    std::vfprintf(stderr, fmt, argptr);\n    va_end(argptr);\n  }\n\n  /**\n   * @brief       Log errors to stderr.\n   * @param fmt   Format string (if additional arguments are passed) or message to display\n   */\n  virtual void error(const char* fmt, ...)\n  {\n    std::va_list argptr;\n    va_start(argptr, fmt);\n    std::vfprintf(stderr, fmt, argptr);\n    va_end(argptr);\n  }\n};\n\n}  // namespace util\n\n#endif  // TRAJOPT_UTIL_HPP\n"
  },
  {
    "path": "include/mppi/dynamics/autorally/ar_nn_model.cu",
    "content": "\n#include \"ar_nn_model.cuh\"\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nNeuralNetModel<S_DIM, C_DIM, K_DIM>::NeuralNetModel(std::array<float2, C_DIM> control_rngs, cudaStream_t stream)\n  : PARENT_CLASS(control_rngs, stream)\n{\n  std::vector<int> args{ 6, 32, 32, 4 };\n  helper_ = new FNNHelper<>(args, stream);\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nNeuralNetModel<S_DIM, C_DIM, K_DIM>::NeuralNetModel(cudaStream_t stream) : PARENT_CLASS(stream)\n{\n  std::vector<int> args{ 6, 32, 32, 4 };\n  helper_ = new FNNHelper<>(args, stream);\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nNeuralNetModel<S_DIM, C_DIM, K_DIM>::~NeuralNetModel()\n{\n  freeCudaMem();\n  free(helper_);\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    helper_->freeCudaMem();\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::updateModel(const std::vector<int>& description,\n                                                      const std::vector<float>& data)\n{\n  helper_->updateModel(description, data);\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::paramsToDevice()\n{\n  if (this->GPUMemStatus_)\n  {\n    helper_->paramsToDevice();\n  }\n  PARENT_CLASS::paramsToDevice();\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::loadParams(const std::string& model_path)\n{\n  helper_->loadParams(model_path);\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nbool NeuralNetModel<S_DIM, C_DIM, K_DIM>::computeGrad(const Eigen::Ref<const state_array>& state,\n                                                      const Eigen::Ref<const control_array>& control,\n                                                      Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n{\n  Eigen::Matrix<float, S_DIM, S_DIM + C_DIM> jac;\n  jac.setZero();\n\n  // Start with the kinematic and physics model derivatives\n  jac.row(0) << 0, 0, -sin(state(2)) * state(4) - cos(state(2)) * state(5), 0, cos(state(2)), -sin(state(2)), 0, 0, 0;\n  jac.row(1) << 0, 0, cos(state(2)) * state(4) - sin(state(2)) * state(5), 0, sin(state(2)), cos(state(2)), 0, 0, 0;\n  jac.row(2) << 0, 0, 0, 0, 0, 0, -1, 0, 0;\n\n  state_array state_der;\n\n  // First do the forward pass\n  computeDynamics(state, control, state_der);\n\n  Eigen::MatrixXf ip_delta = helper_->getZeroJacobianMatrix();\n  helper_->computeGrad(ip_delta);\n\n  jac.bottomRightCorner(DYNAMICS_DIM, DYNAMICS_DIM + C_DIM) += ip_delta;\n  A = jac.leftCols(S_DIM);\n  B = jac.rightCols(C_DIM);\n  return true;\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::computeKinematics(const Eigen::Ref<const state_array>& state,\n                                                            Eigen::Ref<state_array> state_der)\n{\n  state_der(0) = cosf(state(2)) * state(4) - sinf(state(2)) * state(5);\n  state_der(1) = sinf(state(2)) * state(4) + cosf(state(2)) * state(5);\n  state_der(2) = -state(6);  // Pose estimate actually gives the negative yaw derivative\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                                          const Eigen::Ref<const control_array>& control,\n                                                          Eigen::Ref<state_array> state_der)\n{\n  Eigen::VectorXf input = helper_->getZeroInputVector();\n  Eigen::VectorXf output = helper_->getZeroOutputVector();\n  int i;\n  for (i = 0; i < DYNAMICS_DIM; i++)\n  {\n    input(i) = state(i + (S_DIM - DYNAMICS_DIM));\n  }\n  for (i = 0; i < C_DIM; i++)\n  {\n    input(DYNAMICS_DIM + i) = control(i);\n  }\n  helper_->forward(input, output);\n  for (i = 0; i < DYNAMICS_DIM; i++)\n  {\n    state_der(i + (S_DIM - DYNAMICS_DIM)) = output[i];\n  }\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\n__device__ void NeuralNetModel<S_DIM, C_DIM, K_DIM>::computeKinematics(float* state, float* state_der)\n{\n  state_der[0] = cosf(state[2]) * state[4] - sinf(state[2]) * state[5];\n  state_der[1] = sinf(state[2]) * state[4] + cosf(state[2]) * state[5];\n  state_der[2] = -state[6];  // Pose estimate actually gives the negative yaw derivative\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\n__device__ void NeuralNetModel<S_DIM, C_DIM, K_DIM>::computeDynamics(float* state, float* control, float* state_der,\n                                                                     float* theta_s)\n{\n  float* curr_act;\n  int tdy = threadIdx.y;\n  int i;\n  // curr_act = &theta_s[SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float) + 1 + (2 * LARGEST_LAYER) * (blockDim.x * tdz +\n  // tdx)];\n  curr_act = helper_->getInputLocation(theta_s);\n  // iterate through the part of the state that should be an input to the NN\n  for (i = tdy; i < DYNAMICS_DIM; i += blockDim.y)\n  {\n    curr_act[i] = state[i + (S_DIM - DYNAMICS_DIM)];\n  }\n  // iterate through the control to put into first layer\n  for (i = tdy; i < C_DIM; i += blockDim.y)\n  {\n    curr_act[DYNAMICS_DIM + i] = control[i];\n  }\n  __syncthreads();\n\n  curr_act = helper_->forward(nullptr, theta_s);\n\n  // copies results back into state derivative\n  for (i = tdy; i < DYNAMICS_DIM; i += blockDim.y)\n  {\n    state_der[i + (S_DIM - DYNAMICS_DIM)] = curr_act[i];\n  }\n  __syncthreads();\n}\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nvoid NeuralNetModel<S_DIM, C_DIM, K_DIM>::GPUSetup()\n{\n  PARENT_CLASS* derived = static_cast<PARENT_CLASS*>(this);\n  helper_->GPUSetup();\n  derived->GPUSetup();\n\n  HANDLE_ERROR(cudaMemcpyAsync(&(this->model_d_->helper_), &(this->helper_->network_d_), sizeof(FNNHelper<>*),\n                               cudaMemcpyHostToDevice, this->stream_));\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\n__device__ void NeuralNetModel<S_DIM, C_DIM, K_DIM>::initializeDynamics(float* state, float* control, float* output,\n                                                                        float* theta_s, float t_0, float dt)\n{\n  if (output)\n  {\n    for (int i = 0; i < this->OUTPUT_DIM && i < this->STATE_DIM; i++)\n    {\n      output[i] = state[i];\n    }\n  }\n  helper_->initialize(theta_s);\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nNeuralNetModel<S_DIM, C_DIM, K_DIM>::state_array\nNeuralNetModel<S_DIM, C_DIM, K_DIM>::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s;\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  s(S_INDEX(ROLL)) = map.at(\"ROLL\");\n  s(S_INDEX(BODY_VEL_X)) = map.at(\"VEL_X\");\n  s(S_INDEX(BODY_VEL_Y)) = map.at(\"VEL_Y\");\n  s(S_INDEX(YAW_RATE)) = map.at(\"OMEGA_Z\");\n  return s;\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\n__host__ __device__ int NeuralNetModel<S_DIM, C_DIM, K_DIM>::getGrdSharedSizeBytes() const\n{\n  return helper_->getGrdSharedSizeBytes();\n}\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\n__host__ __device__ int NeuralNetModel<S_DIM, C_DIM, K_DIM>::getBlkSharedSizeBytes() const\n{\n  return helper_->getBlkSharedSizeBytes();\n}\n"
  },
  {
    "path": "include/mppi/dynamics/autorally/ar_nn_model.cuh",
    "content": "#ifndef AR_NN_DYNAMICS_CUH_\n#define AR_NN_DYNAMICS_CUH_\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/utils/file_utils.h>\n#include <mppi/utils/nn_helpers/meta_math.h>\n#include <mppi/utils/nn_helpers/fnn_helper.cuh>\n\n#include <Eigen/Dense>\n#include <cuda_runtime.h>\n#include <cnpy.h>\n\n#include <vector>\n/**\n * For AutoRally\n * State x, y, yaw, roll, u_x, u_y, yaw_rate\n * NeuralNetModel<7,2,3,6,32,32,4> model(dt, u_constraint);\n * DYNAMICS_DIM = 4\n */\n\nstruct NNDynamicsParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    YAW,\n    ROLL,\n    BODY_VEL_X,\n    BODY_VEL_Y,\n    YAW_RATE,\n    NUM_STATES\n  };\n  enum class ControlIndex : int\n  {\n    STEERING = 0,\n    THROTTLE,\n    NUM_CONTROLS\n  };\n  enum class OutputIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    YAW,\n    ROLL,\n    BODY_VEL_X,\n    BODY_VEL_Y,\n    YAW_RATE,\n    FILLER_1,\n    NUM_OUTPUTS\n  };\n};\n\n/**\n * @file neural_net_model.cuh\n * @author Grady Williams <gradyrw@gmail.com>\n * @date May 24, 2017\n * @copyright 2017 Georgia Institute of Technology\n * @brief Neural Network Model class definition\n * @tparam S_DIM state dimension\n * @tparam C_DIM control dimension\n * @tparam K_DIM dimensions that are ignored from the state, 1 ignores the first, 2 the first and second, etc.\n * For example in AutoRally we want to input the last 4 values of our state (dim 7), so our K is 3\n * If you use the entire state in the NN K should equal 0\n * @tparam layer_args size of the NN layers\n */\n\nusing namespace MPPI_internal;\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nclass NeuralNetModel : public Dynamics<NeuralNetModel<S_DIM, C_DIM, K_DIM>, NNDynamicsParams>\n{\npublic:\n  // TODO remove duplication of calculation of values, pull from the structure\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  using PARENT_CLASS = Dynamics<NeuralNetModel<S_DIM, C_DIM, K_DIM>, NNDynamicsParams>;\n\n  // Define Eigen fixed size matrices\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n  using output_array = typename PARENT_CLASS::output_array;\n  using dfdx = typename PARENT_CLASS::dfdx;\n  using dfdu = typename PARENT_CLASS::dfdu;\n\n  static const int DYNAMICS_DIM = S_DIM - K_DIM;  ///< number of inputs from state\n\n  NeuralNetModel(cudaStream_t stream = 0);\n  NeuralNetModel(std::array<float2, C_DIM> control_rngs, cudaStream_t stream = 0);\n\n  ~NeuralNetModel();\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"FCN Autorally Model\";\n  }\n\n  __device__ int* getNetStructurePtr()\n  {\n    return helper_->getNetStructurePtr();\n  }\n  __device__ int* getStrideIdcsPtr()\n  {\n    return helper_->getStrideIdcsPtr();\n  }\n  __device__ float* getThetaPtr()\n  {\n    return helper_->getThetaPtr();\n  }\n\n  FNNHelper<>* getHelperPtr()\n  {\n    return helper_;\n  }\n\n  void paramsToDevice();\n\n  __device__ void initializeDynamics(float* state, float* control, float* output, float* theta_s, float t_0, float dt);\n\n  void initializeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                          Eigen::Ref<output_array> output, float t_0, float dt)\n  {\n    PARENT_CLASS::initializeDynamics(state, control, output, t_0, dt);\n  }\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  void printState();\n\n  void printParams();\n\n  void loadParams(const std::string& model_path);\n\n  void updateModel(const std::vector<int>& description, const std::vector<float>& data);\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n  void computeKinematics(const Eigen::Ref<const state_array>& state, Eigen::Ref<state_array> s_der);\n\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta_s = nullptr);\n  __device__ void computeKinematics(float* state, float* state_der);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n  __host__ __device__ int getGrdSharedSizeBytes() const;\n  __host__ __device__ int getBlkSharedSizeBytes() const;\n\nprivate:\n  FNNHelper<>* helper_ = nullptr;\n};\n\ntemplate <int S_DIM, int C_DIM, int K_DIM>\nconst int NeuralNetModel<S_DIM, C_DIM, K_DIM>::DYNAMICS_DIM;\n\n#if __CUDACC__\n#include \"ar_nn_model.cu\"\n#endif\n\n#endif  // AR_NN_DYNAMICS_CUH_\n"
  },
  {
    "path": "include/mppi/dynamics/bicycle_slip/bicycle_slip_parametric.cu",
    "content": "//\n// Created by jason on 12/12/22.\n//\n\n#include \"bicycle_slip_parametric.cuh\"\n\ntemplate <class CLASS_T, class PARAMS_T>\nBicycleSlipParametricImpl<CLASS_T, PARAMS_T>::BicycleSlipParametricImpl(cudaStream_t stream) : PARENT_CLASS(stream)\n{\n  normals_tex_helper_ = new TwoDTextureHelper<float4>(1, stream);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::paramsToDevice()\n{\n  normals_tex_helper_->copyToDevice();\n  PARENT_CLASS::paramsToDevice();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nBicycleSlipParametricImpl<CLASS_T, PARAMS_T>::state_array\nBicycleSlipParametricImpl<CLASS_T, PARAMS_T>::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s = state_array::Zero();\n  if (map.find(\"VEL_X\") == map.end() || map.find(\"VEL_Y\") == map.end() || map.find(\"POS_X\") == map.end() ||\n      map.find(\"POS_Y\") == map.end() || map.find(\"ROLL\") == map.end() || map.find(\"PITCH\") == map.end())\n  {\n    std::cout << \"WARNING: could not find all keys for ackerman slip dynamics\" << std::endl;\n    for (const auto& it : map)\n    {\n      std::cout << \"got key \" << it.first << std::endl;\n    }\n    return s;\n  }\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  s(S_INDEX(VEL_Y)) = map.at(\"VEL_Y\");\n  s(S_INDEX(OMEGA_Z)) = map.at(\"OMEGA_Z\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  s(S_INDEX(ROLL)) = map.at(\"ROLL\");\n  s(S_INDEX(PITCH)) = map.at(\"PITCH\");\n  if (map.find(\"STEER_ANGLE\") != map.end())\n  {\n    s(S_INDEX(STEER_ANGLE)) = map.at(\"STEER_ANGLE\");\n    s(S_INDEX(STEER_ANGLE_RATE)) = map.at(\"STEER_ANGLE_RATE\");\n  }\n  else\n  {\n    std::cout << \"WARNING: unable to find BRAKE_STATE or STEER_ANGLE_RATE, using 0\" << std::endl;\n    s(S_INDEX(STEER_ANGLE)) = 0;\n    s(S_INDEX(STEER_ANGLE_RATE)) = 0;\n  }\n  if (map.find(\"BRAKE_STATE\") != map.end())\n  {\n    s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_STATE\");\n  }\n  else if (map.find(\"BRAKE_CMD\") != map.end())\n  {\n    std::cout << \"WARNING: unable to find BRAKE_STATE\" << std::endl;\n    s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_CMD\");\n  }\n  else\n  {\n    std::cout << \"WARNING: unable to find BRAKE_CMD or BRAKE_STATE\" << std::endl;\n    s(S_INDEX(BRAKE_STATE)) = 0;\n  }\n  return s;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::GPUSetup()\n{\n  PARENT_CLASS::GPUSetup();\n\n  normals_tex_helper_->GPUSetup();\n  // makes sure that the device ptr sees the correct texture object\n  HANDLE_ERROR(cudaMemcpyAsync(&(this->model_d_->normals_tex_helper_), &(normals_tex_helper_->ptr_d_),\n                               sizeof(TwoDTextureHelper<float4>*), cudaMemcpyHostToDevice, this->stream_));\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::freeCudaMem()\n{\n  normals_tex_helper_->freeCudaMem();\n  PARENT_CLASS::freeCudaMem();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                                                   const Eigen::Ref<const control_array>& control,\n                                                                   Eigen::Ref<state_array> state_der)\n{\n  state_der = state_array::Zero();\n  bool enable_brake = control[C_INDEX(THROTTLE_BRAKE)] < 0;\n\n  // runs parametric delay model\n  // this->computeParametricDelayDeriv(state, control, state_der);\n\n  // // runs the parametric part of the steering model\n  // // TODO this needs to be the NN\n  // this->computeParametricSteerDeriv(state, control, state_der);\n\n  // // TODO need to predict the brake state change\n\n  // float normal_x_avg, normal_y_avg, normal_z_avg;\n  // RACER::computeBodyFrameNormals<TwoDTextureHelper<float4>>(\n  //     this->normals_tex_helper_, state(S_INDEX(YAW)), state(S_INDEX(POS_X)), state(S_INDEX(POS_Y)),\n  //     state(S_INDEX(ROLL)), state(S_INDEX(PITCH)), normal_x_avg, normal_y_avg, normal_z_avg);\n\n  // const float brake = tanh_vel_scale(state(S_INDEX(BRAKE_STATE)), state(S_INDEX(VEL_X)), this->params.c_brake);\n  // const float drag_x = tanh_scale(state(S_INDEX(VEL_X)), this->params.c_rolling);\n  // const float scaled_rpm = state(S_INDEX(ENGINE_RPM)) / 1.0e3 - this->params_.min_rpm;\n  // const float throttle = tanhf(powf(scaled_rpm, 2) * this->params_.c_rpm[2] + scaled_rpm * this->params_.c_rpm[1] +\n  //                              this->params_.c_rpm[0]) *\n  //                        this->params_.rpm_scale;\n\n  // const float x_force = throttle - brake - drag_x;\n\n  // const float wheel_angle = tanf(state(S_INDEX(STEER_ANGLE)) / this->params_.steer_angle_scale);\n  // const float parametric_omega = (state(S_INDEX(VEL_X)) / this->params_.wheel_base) * wheel_angle;\n  // state_der(S_INDEX(OMEGA_Z)) = (parametric_omega - state(S_INDEX(OMEGA_Z))) * this->params_.c_omega -\n  //                               state(S_INDEX(OMEGA_Z)) * this->params_.c_v_omega;\n\n  // const float drag_y = tanh_scale(state(S_INDEX(VEL_Y)), this->params_.c_sliding);\n  // const float y_force =\n  //     tanhf(state(S_INDEX(VEL_X)) * state(S_INDEX(OMEGA_Z)) * this->params_.y_f_c[0]) * this->params_.y_f_c[1] -\n  //     drag_y;\n\n  // float wheel_angle, sin_wheel_angle, cos_wheel_angle;\n  // sincosf(wheel_angle, &sin_wheel_angle, &cos_wheel_angle);\n\n  // state_der(S_INDEX(VEL_X)) = (x_force + x_force * cos_wheel_angle - y_force * sin_wheel_angle) / this->params_.mass\n  // -\n  //                             state(S_INDEX(VEL_X)) * this->params_.c_vx - normal_x_avg * this->params_.gravity_x +\n  //                             state(S_INDEX(VEL_Y)) * state(S_INDEX(OMEGA_Z));\n\n  // state_der(S_INDEX(VEL_Y)) = (y_force + y_force * cos_wheel_angle + x_force * sin_wheel_angle) / this->params_.mass\n  // -\n  //                             state(S_INDEX(VEL_Y)) * this->params_.c_vy - normal_y_avg * this->params_.gravity_y -\n  //                             state(S_INDEX(VEL_X)) * state(S_INDEX(OMEGA_Z));\n\n  // // kinematics component\n  // state_der(S_INDEX(YAW)) = state(S_INDEX(OMEGA_Z));\n  // float yaw, sin_yaw, cos_yaw;\n  // yaw = state[S_INDEX(YAW)];\n  // sincosf(yaw, &sin_yaw, &cos_yaw);\n  // state_der(S_INDEX(POS_X)) = state(S_INDEX(VEL_X)) * cos_yaw - state(S_INDEX(VEL_Y)) * sin_yaw;\n  // state_der(S_INDEX(POS_Y)) = state(S_INDEX(VEL_X)) * sin_yaw + state(S_INDEX(VEL_Y)) * cos_yaw;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::updateState(const Eigen::Ref<const state_array> state,\n                                                               Eigen::Ref<state_array> next_state,\n                                                               Eigen::Ref<state_array> state_der, const float dt)\n{\n  next_state = state + state_der * dt;\n  next_state(S_INDEX(YAW)) = angle_utils::normalizeAngle(next_state(S_INDEX(YAW)));\n  next_state(S_INDEX(STEER_ANGLE)) =\n      max(min(next_state(S_INDEX(STEER_ANGLE)), this->params_.max_steer_angle), -this->params_.max_steer_angle);\n  next_state(S_INDEX(STEER_ANGLE_RATE)) = state_der(S_INDEX(STEER_ANGLE));\n  next_state(S_INDEX(BRAKE_STATE)) =\n      min(max(next_state(S_INDEX(BRAKE_STATE)), 0.0f), -this->control_rngs_[C_INDEX(THROTTLE_BRAKE)].x);\n  next_state(S_INDEX(ROLL)) = state(S_INDEX(ROLL));\n  next_state(S_INDEX(PITCH)) = state(S_INDEX(PITCH));\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::step(Eigen::Ref<state_array> state,\n                                                        Eigen::Ref<state_array> next_state,\n                                                        Eigen::Ref<state_array> state_der,\n                                                        const Eigen::Ref<const control_array>& control,\n                                                        Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  computeDynamics(state, control, state_der);\n  updateState(state, next_state, state_der, dt);\n\n#ifdef BICYCLE_UNCERTAINTY\n  typename PARENT_CLASS::SharedBlock sb;\n#endif\n\n  output = output_array::Zero();\n\n#ifdef BICYCLE_UNCERTAINTY\n  PARENT_CLASS::computeUncertaintyPropagation(state.data(), control.data(), state_der.data(), next_state.data(), dt,\n                                              &this->params_, &sb, nullptr);\n#endif\n\n  float roll = state(S_INDEX(ROLL));\n  float pitch = state(S_INDEX(PITCH));\n  RACER::computeStaticSettling<typename DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n      this->tex_helper_, next_state(S_INDEX(YAW)), next_state(S_INDEX(POS_X)), next_state(S_INDEX(POS_Y)), roll, pitch,\n      output.data());\n  next_state[S_INDEX(PITCH)] = pitch;\n  next_state[S_INDEX(ROLL)] = roll;\n\n  this->setOutputs(state_der.data(), next_state.data(), output.data());\n\n  output[O_INDEX(BASELINK_VEL_B_Y)] = next_state[S_INDEX(VEL_Y)];\n  output[O_INDEX(ACCEL_Y)] = state_der[S_INDEX(VEL_Y)];\n  output[O_INDEX(OMEGA_Z)] = next_state[S_INDEX(OMEGA_Z)];\n  output[O_INDEX(TOTAL_VELOCITY)] =\n      mppi::math::sign(next_state(S_INDEX(VEL_X))) * sqrt(next_state[S_INDEX(VEL_X)] * next_state[S_INDEX(VEL_X)] +\n                                                          next_state[S_INDEX(VEL_Y)] * next_state[S_INDEX(VEL_Y)]);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::updateState(float* state, float* next_state,\n                                                                          float* state_der, const float dt,\n                                                                          typename PARENT_CLASS::DYN_PARAMS_T* params_p)\n{\n  for (int i = threadIdx.y; i < 8; i += blockDim.y)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n    switch (i)\n    {\n      case S_INDEX(YAW):\n        next_state[i] = angle_utils::normalizeAngle(next_state[i]);\n        break;\n      case S_INDEX(STEER_ANGLE):\n        next_state[S_INDEX(STEER_ANGLE)] =\n            max(min(next_state[S_INDEX(STEER_ANGLE)], params_p->max_steer_angle), -params_p->max_steer_angle);\n        next_state[S_INDEX(STEER_ANGLE_RATE)] = state_der[S_INDEX(STEER_ANGLE)];\n        break;\n      case S_INDEX(BRAKE_STATE):\n        next_state[S_INDEX(BRAKE_STATE)] =\n            min(max(next_state[S_INDEX(BRAKE_STATE)], 0.0f), -this->control_rngs_[C_INDEX(THROTTLE_BRAKE)].x);\n    }\n  }\n\n  __syncthreads();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::computeDynamics(float* state, float* control,\n                                                                              float* state_der, float* theta)\n{\n  DYN_PARAMS_T* params_p = nullptr;\n\n  if (PARENT_CLASS::SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  {  // Allows us to turn on or off global or shared memory version of params\n    params_p = (DYN_PARAMS_T*)theta;\n  }\n  else\n  {\n    params_p = &(this->params_);\n  }\n\n  // parametric part of the brake\n  this->computeParametricDelayDeriv(state, control, state_der, params_p);\n\n  // runs the parametric part of the steering model\n  this->computeParametricSteerDeriv(state, control, state_der, params_p);\n\n  // nullptr if not shared memory\n  // bool enable_brake = control[C_INDEX(THROTTLE_BRAKE)] < 0;\n  // const float brake_cmd = -enable_brake * control[C_INDEX(THROTTLE_BRAKE)];\n  // float linear_brake_slope = params_p->c_b[1] / (0.9f * (2.0f / 0.02));\n  // int index = (fabsf(state[S_INDEX(VEL_X)]) > linear_brake_slope && fabsf(state[S_INDEX(VEL_X)]) <= 3.0f) +\n  //             (fabsf(state[S_INDEX(VEL_X)]) > 3.0f) * 2;\n  // float throttle = params_p->c_t[index] * control[C_INDEX(THROTTLE_BRAKE)] * params_p->gear_sign;\n\n  // // calculates the average normals\n  // float normal_x_avg, normal_y_avg, normal_z_avg;\n  // RACER::computeBodyFrameNormals<TwoDTextureHelper<float4>>(\n  //     this->normals_tex_helper_, state[S_INDEX(YAW)], state[S_INDEX(POS_X)], state[S_INDEX(POS_Y)],\n  //     state[S_INDEX(ROLL)], state[S_INDEX(PITCH)], normal_x_avg, normal_y_avg, normal_z_avg);\n  // const float gravity_x_accel = act_func::tanhshrink_scale(normal_x_avg, params_p->min_normal_x) *\n  // params_p->gravity_x; const float gravity_y_accel = act_func::tanhshrink_scale(normal_y_avg, params_p->min_normal_y)\n  // * params_p->gravity_y;\n\n  // const float brake_vel = mppi::math::clamp(state[S_INDEX(VEL_X)], -params_p->brake_vel, params_p->brake_vel);\n  // const float rolling_vel =\n  //     mppi::math::clamp(state[S_INDEX(VEL_X)], -params_p->max_roll_resistance_vel,\n  //     params_p->max_roll_resistance_vel);\n  // const float sliding_vel = mppi::math::clamp(state[S_INDEX(VEL_Y)], -params_p->max_slide_vel,\n  // params_p->max_slide_vel);\n\n  // const float brake = params_p->c_b[0] * state[S_INDEX(BRAKE_STATE)] *\n  //                     mppi::math::clamp(state[S_INDEX(VEL_X)], -params_p->brake_vel, params_p->brake_vel);\n  // const float x_drag = params_p->c_v[0] * state[S_INDEX(VEL_X)] + rolling_vel * normal_z_avg * params_p->c_rolling;\n  // const float y_vel_comp = state[S_INDEX(VEL_Y)] * state[S_INDEX(OMEGA_Z)];\n  // const float accel_x = throttle - brake - x_drag;\n  // const float mu_actual = (params_p->mu + params_p->environment * params_p->mu_env) * normal_z_avg;\n  // state_der[S_INDEX(VEL_X)] = mppi::math::clamp(accel_x, -mu_actual, mu_actual) - gravity_x_accel + y_vel_comp;\n\n  // float y_accel = -state[S_INDEX(VEL_X)] * state[S_INDEX(OMEGA_Z)] +\n  //                 mppi::math::sign(state[S_INDEX(VEL_X)]) * state[S_INDEX(OMEGA_Z)] * params_p->vy_omega;\n  // const float drag_y = params_p->c_vy * state[S_INDEX(VEL_Y)] + sliding_vel * normal_z_avg * params_p->c_sliding;\n  // state_der[S_INDEX(VEL_Y)] = y_accel - drag_y - gravity_y_accel;\n\n  // const float parametric_omega =\n  //     (state[S_INDEX(VEL_X)] / params_p->wheel_base) * tanf(state[S_INDEX(STEER_ANGLE)] /\n  //     params_p->steer_angle_scale);\n  // state_der[S_INDEX(OMEGA_Z)] =\n  //     (parametric_omega - state[S_INDEX(OMEGA_Z)]) * params_p->c_omega - state[S_INDEX(OMEGA_Z)] *\n  //     params_p->c_v_omega;\n\n  // // kinematics component\n  // state_der[S_INDEX(YAW)] = state[S_INDEX(OMEGA_Z)];\n  // float yaw, sin_yaw, cos_yaw;\n  // yaw = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  // __sincosf(yaw, &sin_yaw, &cos_yaw);\n  // state_der[S_INDEX(POS_X)] = state[S_INDEX(VEL_X)] * cos_yaw - state[S_INDEX(VEL_Y)] * sin_yaw;\n  // state_der[S_INDEX(POS_Y)] = state[S_INDEX(VEL_X)] * sin_yaw + state[S_INDEX(VEL_Y)] * cos_yaw;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::step(float* state, float* next_state, float* state_der,\n                                                                   float* control, float* output, float* theta_s,\n                                                                   const float t, const float dt)\n{\n  DYN_PARAMS_T* params_p;\n#ifdef BICYCLE_UNCERTAINTY\n  typename PARENT_CLASS::SharedBlock *sb_mem, *sb;\n#endif\n  if (PARENT_CLASS::SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  {  // Allows us to turn on or off global or shared memory version of params\n    params_p = (DYN_PARAMS_T*)theta_s;\n  }\n  else\n  {\n    params_p = &(this->params_);\n  }\n#ifdef BICYCLE_UNCERTAINTY\n  if (this->SHARED_MEM_REQUEST_BLK_BYTES != 0)\n  {\n    sb_mem = (typename PARENT_CLASS::SharedBlock*)&theta_s[mppi::math::int_multiple_const(\n                                                               this->SHARED_MEM_REQUEST_GRD_BYTES, sizeof(float4)) /\n                                                           sizeof(float)];\n    sb = &sb_mem[threadIdx.x + blockDim.x * threadIdx.z];\n  }\n#endif\n  const uint tdy = threadIdx.y;\n\n  computeDynamics(state, control, state_der, theta_s);\n  updateState(state, next_state, state_der, dt, params_p);\n#ifdef BICYCLE_UNCERTAINTY\n  PARENT_CLASS::computeUncertaintyPropagation(state, control, state_der, next_state, dt, params_p, sb, theta_s);\n#endif\n\n  if (tdy == 0)\n  {\n    float roll = state[S_INDEX(ROLL)];\n    float pitch = state[S_INDEX(PITCH)];\n    RACER::computeStaticSettling<DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n        this->tex_helper_, next_state[S_INDEX(YAW)], next_state[S_INDEX(POS_X)], next_state[S_INDEX(POS_Y)], roll,\n        pitch, output);\n    next_state[S_INDEX(PITCH)] = pitch;\n    next_state[S_INDEX(ROLL)] = roll;\n  }\n  __syncthreads();\n  this->setOutputs(state_der, next_state, output);\n\n  // Rather than need a syncthreads, just overwrite the given outputs using the proper thread\n  if (tdy == O_INDEX(BASELINK_VEL_B_Y) % blockIdx.y)\n  {\n    output[O_INDEX(BASELINK_VEL_B_Y)] = next_state[S_INDEX(VEL_Y)];\n  }\n  if (tdy == O_INDEX(ACCEL_Y) % blockIdx.y)\n  {\n    output[O_INDEX(ACCEL_Y)] = state_der[S_INDEX(VEL_Y)];\n  }\n  if (tdy == O_INDEX(OMEGA_Z) % blockIdx.y)\n  {\n    output[O_INDEX(OMEGA_Z)] = next_state[S_INDEX(OMEGA_Z)];\n  }\n  if (tdy == O_INDEX(TOTAL_VELOCITY) % blockIdx.y)\n  {\n    const float vel_x_sign = output[O_INDEX(TOTAL_VELOCITY)] =\n        mppi::math::sign(next_state[S_INDEX(VEL_X)]) * sqrtf(next_state[S_INDEX(VEL_X)] * next_state[S_INDEX(VEL_X)] +\n                                                             next_state[S_INDEX(VEL_Y)] * next_state[S_INDEX(VEL_Y)]);\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Vector3f\nBicycleSlipParametricImpl<CLASS_T, PARAMS_T>::velocityFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Vector3f(state[S_INDEX(VEL_X)], state(S_INDEX(VEL_Y)), 0);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Vector3f\nBicycleSlipParametricImpl<CLASS_T, PARAMS_T>::angularRateFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Vector3f(0, 0, state[S_INDEX(OMEGA_Z)]);\n}\n\ntemplate <class TEX_T>\n__device__ __host__ void RACER::computeBodyFrameNormals(TEX_T* tex_helper, const float yaw, const float x,\n                                                        const float y, const float roll, const float pitch,\n                                                        float& mean_normals_x, float& mean_normals_y,\n                                                        float& mean_normals_z)\n{\n  float3 front_left = make_float3(2.981f, 0.737f, 0.0f);\n  float3 front_right = make_float3(2.981f, -0.737f, 0.f);\n  float3 rear_left = make_float3(0.0f, 0.737f, 0.0f);\n  float3 rear_right = make_float3(0.0f, -0.737f, 0.0f);\n  float3 body_pose = make_float3(x, y, 0.0f);\n  float3 rotation = make_float3(roll, pitch, yaw);\n\n  if (tex_helper->checkTextureUse(0))\n  {\n    float4 front_left_normals = tex_helper->queryTextureAtWorldOffsetPose(0, body_pose, front_left, rotation);\n    float4 front_right_normals = tex_helper->queryTextureAtWorldOffsetPose(0, body_pose, front_right, rotation);\n    float4 rear_left_normals = tex_helper->queryTextureAtWorldOffsetPose(0, body_pose, rear_left, rotation);\n    float4 rear_right_normals = tex_helper->queryTextureAtWorldOffsetPose(0, body_pose, rear_right, rotation);\n\n    // TODO need to rotate the normals for x,y\n\n#ifdef __CUDA_ARCH__\n    const float cos_yaw = __cosf(yaw);\n    const float sin_yaw = __sinf(yaw);\n#else\n    const float cos_yaw = cosf(yaw);\n    const float sin_yaw = sinf(yaw);\n#endif\n\n    float3 front_left_normals_rot;\n    front_left_normals_rot.x = cos_yaw * front_left_normals.x - sin_yaw * front_left_normals.y;\n    front_left_normals_rot.y = sin_yaw * front_left_normals.x + cos_yaw * front_left_normals.y;\n    front_left_normals_rot.z = front_left_normals.z;\n\n    float3 front_right_normals_rot;\n    front_right_normals_rot.x = cos_yaw * front_right_normals.x - sin_yaw * front_right_normals.y;\n    front_right_normals_rot.y = sin_yaw * front_right_normals.x + cos_yaw * front_right_normals.y;\n    front_right_normals_rot.z = front_right_normals.z;\n\n    float3 rear_left_normals_rot;\n    rear_left_normals_rot.x = cos_yaw * rear_left_normals.x - sin_yaw * rear_left_normals.y;\n    rear_left_normals_rot.y = sin_yaw * rear_left_normals.x + cos_yaw * rear_left_normals.y;\n    rear_left_normals_rot.z = rear_left_normals.z;\n\n    float3 rear_right_normals_rot;\n    rear_right_normals_rot.x = cos_yaw * rear_right_normals.x - sin_yaw * rear_right_normals.y;\n    rear_right_normals_rot.y = sin_yaw * rear_right_normals.x + cos_yaw * rear_right_normals.y;\n    rear_right_normals_rot.z = rear_right_normals.z;\n\n    mean_normals_x =\n        (front_left_normals_rot.x + front_right_normals_rot.x + rear_right_normals_rot.x + rear_left_normals_rot.x) /\n        4.0f;\n    mean_normals_y =\n        (front_left_normals_rot.y + front_right_normals_rot.y + rear_right_normals_rot.y + rear_left_normals_rot.y) /\n        4.0f;\n    mean_normals_z =\n        (front_left_normals_rot.z + front_right_normals_rot.z + rear_right_normals_rot.z + rear_left_normals_rot.z) /\n        4.0f;\n\n    // using 2pi so any rotation that accidently uses this will be using identity\n    if (!isfinite(mean_normals_x) || !isfinite(mean_normals_y) || !isfinite(mean_normals_z))\n    {\n      mean_normals_x = 0.0f;\n      mean_normals_y = 0.0f;\n      mean_normals_z = 1.0f;\n    }\n  }\n  else\n  {\n    mean_normals_x = 0.0f;\n    mean_normals_y = 0.0f;\n    mean_normals_z = 1.0f;\n  }\n}\n\n#ifdef BICYCLE_UNCERTAINTY\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ bool BicycleSlipParametricImpl<CLASS_T, PARAMS_T>::computeUncertaintyJacobian(const float* state,\n                                                                                                  const float* control,\n                                                                                                  float* A,\n                                                                                                  PARAMS_T* params_p)\n{\n  float sin_yaw, cos_yaw, cos_2_delta;\n#ifdef __CUDA_ARCH__\n  float yaw_norm = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  float delta = angle_utils::normalizeAngle(state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale);\n  __sincosf(yaw_norm, &sin_yaw, &cos_yaw);\n  cos_2_delta = __cosf(delta) * __cosf(delta);\n#else\n  sincosf(state[S_INDEX(YAW)], &sin_yaw, &cos_yaw);\n  float delta = state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale;\n  cos_2_delta = cosf(delta) * cosf(delta);\n#endif\n\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  // A = df/dx + df/du * K\n  for (int i = pi; i < this->UNCERTAINTY_DIM * this->UNCERTAINTY_DIM; i += step)\n  {\n    switch (i)\n    {\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), this->UNCERTAINTY_DIM):\n        A[i] = -params_p->c_v[0] - params_p->K_vel_x;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(YAW), this->UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_X), this->UNCERTAINTY_DIM):\n        A[i] = -params_p->K_x * cos_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_Y), this->UNCERTAINTY_DIM):\n        A[i] = -params_p->K_x * sin_yaw;\n        break;\n\n      // yaw\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), this->UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), this->UNCERTAINTY_DIM):\n        A[i] = -fabsf(state[S_INDEX(VEL_X)]) * params_p->K_yaw / (params_p->wheel_base * cos_2_delta);\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_X), this->UNCERTAINTY_DIM):\n        A[i] = state[S_INDEX(VEL_X)] * params_p->K_y * sin_yaw / (params_p->wheel_base * cos_2_delta);\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_Y), this->UNCERTAINTY_DIM):\n        A[i] = -state[S_INDEX(VEL_X)] * params_p->K_y * cos_yaw / (params_p->wheel_base * cos_2_delta);\n        break;\n      // pos x\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), this->UNCERTAINTY_DIM):\n        A[i] = cos_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), this->UNCERTAINTY_DIM):\n        A[i] = -sin_yaw * state[S_INDEX(VEL_X)] - cos_yaw * state[S_INDEX(VEL_Y)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), this->UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_Y), this->UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      // pos y\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), this->UNCERTAINTY_DIM):\n        A[i] = sin_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), this->UNCERTAINTY_DIM):\n        A[i] = cos_yaw * state[S_INDEX(VEL_X)] - sin_yaw * state[S_INDEX(VEL_Y)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), this->UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), this->UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n    }\n  }\n  return true;\n}\n#endif\n"
  },
  {
    "path": "include/mppi/dynamics/bicycle_slip/bicycle_slip_parametric.cuh",
    "content": "//\n// Created by jason on 12/12/22.\n//\n\n#ifndef MPPIGENERIC_BICYCLE_SLIP_PARAMTERIC_CUH\n#define MPPIGENERIC_BICYCLE_SLIP_PARAMTERIC_CUH\n\n#include <mppi/utils/angle_utils.cuh>\n#include <mppi/utils/math_utils.h>\n#include <mppi/utils/texture_helpers/two_d_texture_helper.cuh>\n#include <mppi/utils/activation_functions.cuh>\n#include <mppi/dynamics/racer_dubins/racer_dubins_elevation.cuh>\n\n#define BICYCLE_UNCERTAINTY\n\nnamespace RACER\n{\ntemplate <class TEX_T>\n__device__ __host__ void computeBodyFrameNormals(TEX_T* tex_helper, const float yaw, const float x, const float y,\n                                                 const float roll, const float pitch, float& mean_normals_x,\n                                                 float& mean_normals_y, float& mean_normals_z);\n};\n\nstruct BicycleSlipParametricParams : public RacerDubinsElevationParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    YAW,\n    STEER_ANGLE,\n    BRAKE_STATE,\n    VEL_X,\n    VEL_Y,\n    OMEGA_Z,\n    ROLL,\n    PITCH,\n    STEER_ANGLE_RATE,\n    ENGINE_RPM,  // TODO: Figure out if more filler is necessary\n    UNCERTAINTY_POS_X,\n    UNCERTAINTY_POS_Y,\n    UNCERTAINTY_YAW,\n    UNCERTAINTY_VEL_X,\n    UNCERTAINTY_POS_X_Y,\n    UNCERTAINTY_POS_X_YAW,\n    UNCERTAINTY_POS_X_VEL_X,\n    UNCERTAINTY_POS_Y_YAW,\n    UNCERTAINTY_POS_Y_VEL_X,\n    UNCERTAINTY_YAW_VEL_X,\n    NUM_STATES\n  };\n\n  float gravity_x = -3.9f;\n  float gravity_y = -7.2f;\n  float mass = 0.61;\n  // vx\n  float min_rpm = 1.1f;\n  float rpm_scale = 1.3;\n  float c_rolling[2] = { 0.3f, 0.4f };\n  float c_rpm[3] = {0.01f, 0.066f, 0.02f};\n  float c_vx = 0.12f;\n  float c_brake[2] = {0.4f, 4.23f};\n  // vy\n  float c_vy = 0.12f;\n  float y_f_c[2] = { 0.9f, 0.8f };\n  float c_sliding[2] = { 1.5f, 1.7f };\n\n  float c_omega = 2.2f;\n  float c_v_omega = 4.2f;\n\n  BicycleSlipParametricParams()\n  {\n    c_t[0] = 2.73f;\n    c_t[1] = 0.15f;\n    c_t[2] = -0.0145f;\n\n    c_b[0] = 30.0f;\n    c_v[0] = 0.079f;  // c_vx\n  }\n};\n\ntemplate <class CLASS_T, class PARAMS_T>\nclass BicycleSlipParametricImpl : public RacerDubinsElevationImpl<CLASS_T, PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = RacerDubinsElevationImpl<CLASS_T, PARAMS_T>;\n\n  typedef PARAMS_T DYN_PARAMS_T;\n\n  // static const int SHARED_MEM_REQUEST_GRD_BYTES = sizeof(PARAMS_T);  // TODO set to one to prevent array of size 0 error\n  // static const int SHARED_MEM_REQUEST_BLK_BYTES = PARENT_CLASS::SHARED_MEM_REQUEST_BLK_BYTES;\n\n  typedef typename PARENT_CLASS::state_array state_array;\n  typedef typename PARENT_CLASS::control_array control_array;\n  typedef typename PARENT_CLASS::output_array output_array;\n  typedef typename PARENT_CLASS::dfdx dfdx;\n  typedef typename PARENT_CLASS::dfdu dfdu;\n\n  BicycleSlipParametricImpl(cudaStream_t stream = nullptr);\n  BicycleSlipParametricImpl(const std::string& model_path, cudaStream_t stream = nullptr);\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"Bicycle Slip Parametric Model\";\n  }\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt)\n  {\n  }\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt,\n                              typename PARENT_CLASS::DYN_PARAMS_T* params_p);\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta = nullptr);\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n#ifdef BICYCLE_UNCERTAINTY\n  __host__ __device__ bool computeUncertaintyJacobian(const float* state, const float* control, float* A,\n                                                      PARAMS_T* params_p);\n#endif\n\n  void paramsToDevice();\n\n  // state_array interpolateState(const Eigen::Ref<state_array> state_1, const Eigen::Ref<state_array> state_2,\n  //                              const float alpha);\n\n  // bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n  //                  Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  Eigen::Vector3f velocityFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f angularRateFromState(const Eigen::Ref<const state_array>& state);\n\n  // void enforceLeash(const Eigen::Ref<const state_array>& state_true, const Eigen::Ref<const state_array>&\n  // state_nominal,\n  //                   const Eigen::Ref<const state_array>& leash_values, Eigen::Ref<state_array> state_output);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n\n  TwoDTextureHelper<float4>* getTextureHelperNormals()\n  {\n    return normals_tex_helper_;\n  }\n\n  void updateRotation(std::array<float3, 3>& rotation)\n  {\n    this->tex_helper_->updateRotation(0, rotation);\n    this->normals_tex_helper_->updateRotation(0, rotation);\n    this->normals_tex_helper_->updateRotation(1, rotation);\n    this->normals_tex_helper_->updateRotation(2, rotation);\n  }\n\nprotected:\n  TwoDTextureHelper<float4>* normals_tex_helper_ = nullptr;\n};\n\nclass BicycleSlipParametric : public BicycleSlipParametricImpl<BicycleSlipParametric, BicycleSlipParametricParams>\n{\npublic:\n  BicycleSlipParametric(cudaStream_t stream = nullptr)\n    : BicycleSlipParametricImpl<BicycleSlipParametric, BicycleSlipParametricParams>(stream)\n  {\n  }\n  BicycleSlipParametric(const std::string& model_path, cudaStream_t stream = nullptr)\n    : BicycleSlipParametricImpl<BicycleSlipParametric, BicycleSlipParametricParams>(model_path, stream)\n  {\n  }\n};\n\n#if __CUDACC__\n#include \"bicycle_slip_parametric.cu\"\n#endif\n\n#endif  // MPPIGENERIC_BICYCLE_SLIP_PARAMTERIC_CUH\n"
  },
  {
    "path": "include/mppi/dynamics/cartpole/cartpole_dynamics.cu",
    "content": "#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/utils/math_utils.h>\n\nCartpoleDynamics::CartpoleDynamics(float cart_mass, float pole_mass, float pole_length, cudaStream_t stream)\n  : Dynamics<CartpoleDynamics, CartpoleDynamicsParams>(stream)\n{\n  this->params_ = CartpoleDynamicsParams(cart_mass, pole_mass, pole_length);\n}\n\nbool CartpoleDynamics::computeGrad(const Eigen::Ref<const state_array>& state,\n                                   const Eigen::Ref<const control_array>& control, Eigen::Ref<dfdx> A,\n                                   Eigen::Ref<dfdu> B)\n{\n  float theta = state(2);\n  float theta_dot = state(3);\n  float force = control(0);\n\n  A(0, 1) = 1.0;\n  A(1, 2) =\n      (this->params_.pole_mass * cosf(theta) * (this->params_.pole_length * SQ(theta_dot) + gravity_ * cosf(theta)) -\n       gravity_ * this->params_.pole_mass * SQ(sinf(theta))) /\n          (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta))) -\n      (2 * this->params_.pole_mass * cosf(theta) * sinf(theta) *\n       (force +\n        this->params_.pole_mass * sinf(theta) * (this->params_.pole_length * SQ(theta_dot) + gravity_ * cosf(theta)))) /\n          powf((this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta))), 2.0);\n  A(1, 3) = (2 * this->params_.pole_length * this->params_.pole_mass * theta_dot * sinf(theta)) /\n            (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta)));\n  A(2, 3) = 1.0;\n  A(3, 2) =\n      (force * sinf(theta) - gravity_ * cosf(theta) * (this->params_.pole_mass + this->params_.cart_mass) -\n       this->params_.pole_length * this->params_.pole_mass * SQ(theta_dot) * SQ(cosf(theta)) +\n       this->params_.pole_length * this->params_.pole_mass * SQ(theta_dot) * SQ(sinf(theta))) /\n          (this->params_.pole_length * (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta)))) +\n      (2 * this->params_.pole_mass * cosf(theta) * sinf(theta) *\n       (this->params_.pole_length * this->params_.pole_mass * cosf(theta) * sinf(theta) * SQ(theta_dot) +\n        force * cosf(theta) + gravity_ * sinf(theta) * (this->params_.pole_mass + this->params_.cart_mass))) /\n          powf(this->params_.pole_length * (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta))), 2.0);\n  A(3, 3) = -(2 * this->params_.pole_mass * theta_dot * cosf(theta) * sinf(theta)) /\n            (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta)));\n\n  B(1, 0) = 1 / (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta)));\n  B(3, 0) = -cosf(theta) /\n            (this->params_.pole_length * (this->params_.cart_mass + this->params_.pole_mass * SQ(sinf(theta))));\n  return true;\n}\n\nvoid CartpoleDynamics::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                       const Eigen::Ref<const control_array>& control,\n                                       Eigen::Ref<state_array> state_der)\n{\n  const float sin_theta = sinf(state(S_INDEX(THETA)));\n  const float cos_theta = cosf(state(S_INDEX(THETA)));\n  float theta_dot = state[S_INDEX(THETA_DOT)];\n  float force = control[C_INDEX(FORCE)];\n  const float m_c = this->params_.cart_mass;\n  const float m_p = this->params_.pole_mass;\n  const float l_p = this->params_.pole_length;\n\n  state_der(S_INDEX(POS_X)) = state(S_INDEX(VEL_X));\n  state_der(S_INDEX(VEL_X)) =\n      (force + m_p * sin_theta * (l_p * SQ(theta_dot) + gravity_ * cos_theta)) / (m_c + m_p * SQ(sin_theta));\n  state_der(S_INDEX(THETA)) = theta_dot;\n  state_der(S_INDEX(THETA_DOT)) =\n      (-force * cos_theta - m_p * l_p * SQ(theta_dot) * cos_theta * sin_theta - (m_c + m_p) * gravity_ * sin_theta) /\n      (l_p * (m_c + m_p * SQ(sin_theta)));\n}\n\nvoid CartpoleDynamics::printState(const Eigen::Ref<const state_array>& state)\n{\n  printf(\"Cart position: %f; Cart velocity: %f; Pole angle: %f; Pole rate: %f \\n\", state(0), state(1), state(2),\n         state(3));  // Needs to be completed\n}\n\nvoid CartpoleDynamics::printState(float* state)\n{\n  printf(\"Cart position: %f; Cart velocity: %f; Pole angle: %f; Pole rate: %f \\n\", state[0], state[1], state[2],\n         state[3]);  // Needs to be completed\n}\n\nvoid CartpoleDynamics::printParams()\n{\n  printf(\"Cart mass: %f; Pole mass: %f; Pole length: %f \\n\", this->params_.cart_mass, this->params_.pole_mass,\n         this->params_.pole_length);\n}\n\n__device__ void CartpoleDynamics::computeDynamics(float* state, float* control, float* state_der, float* theta_s)\n{\n  float theta = angle_utils::normalizeAngle(state[S_INDEX(THETA)]);\n  const float sin_theta = __sinf(theta);\n  const float cos_theta = __cosf(theta);\n  float theta_dot = state[S_INDEX(THETA_DOT)];\n  float force = control[C_INDEX(FORCE)];\n  const float m_c = this->params_.cart_mass;\n  const float m_p = this->params_.pole_mass;\n  const float l_p = this->params_.pole_length;\n\n  state_der[S_INDEX(POS_X)] = state[S_INDEX(VEL_X)];\n  state_der[S_INDEX(VEL_X)] =\n      (force + m_p * sin_theta * (l_p * SQ(theta_dot) + gravity_ * cos_theta)) / (m_c + m_p * SQ(sin_theta));\n  state_der[S_INDEX(THETA)] = theta_dot;\n  state_der[S_INDEX(THETA_DOT)] =\n      (-force * cos_theta - m_p * l_p * SQ(theta_dot) * cos_theta * sin_theta - (m_c + m_p) * gravity_ * sin_theta) /\n      (l_p * (m_c + m_p * SQ(sin_theta)));\n}\n\nDynamics<CartpoleDynamics, CartpoleDynamicsParams>::state_array\nCartpoleDynamics::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s;\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  s(S_INDEX(THETA)) = map.at(\"THETA\");\n  s(S_INDEX(THETA_DOT)) = map.at(\"THETA_DOT\");\n  return s;\n}\n"
  },
  {
    "path": "include/mppi/dynamics/cartpole/cartpole_dynamics.cuh",
    "content": "#ifndef CARTPOLE_CUH_\n#define CARTPOLE_CUH_\n\n#include <mppi/dynamics/dynamics.cuh>\n\nstruct CartpoleDynamicsParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    VEL_X,\n    THETA,\n    THETA_DOT,\n    NUM_STATES\n  };\n  enum class ControlIndex : int\n  {\n    FORCE = 0,\n    NUM_CONTROLS\n  };\n  enum class OutputIndex : int\n  {\n    POS_X = 0,\n    VEL_X,\n    THETA,\n    THETA_DOT,\n    NUM_OUTPUTS\n  };\n  float cart_mass = 1.0f;\n  float pole_mass = 1.0f;\n  float pole_length = 1.0f;\n\n  CartpoleDynamicsParams() = default;\n  CartpoleDynamicsParams(float cart_mass, float pole_mass, float pole_length)\n    : cart_mass(cart_mass), pole_mass(pole_mass), pole_length(pole_length){};\n};\nusing namespace MPPI_internal;\n\nclass CartpoleDynamics : public Dynamics<CartpoleDynamics, CartpoleDynamicsParams>\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  using PARENT_CLASS = Dynamics<CartpoleDynamics, CartpoleDynamicsParams>;\n  CartpoleDynamics(float cart_mass = 1.0f, float pole_mass = 1.0f, float pole_length = 1.0f, cudaStream_t stream = 0);\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"Cartpole Model\";\n  }\n\n  /**\n   * runs dynamics using state and control and sets it to state\n   * derivative. Everything is Eigen Matrices, not Eigen Vectors!\n   *\n   * @param state     input of current state, passed by reference\n   * @param control   input of currrent control, passed by reference\n   * @param state_der output of new state derivative, passed by reference\n   */\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  /**\n   * compute the Jacobians with respect to state and control\n   *\n   * @param state   input of current state, passed by reference\n   * @param control input of currrent control, passed by reference\n\n   */\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  __host__ __device__ float getCartMass()\n  {\n    return this->params_.cart_mass;\n  };\n  __host__ __device__ float getPoleMass()\n  {\n    return this->params_.pole_mass;\n  };\n  __host__ __device__ float getPoleLength()\n  {\n    return this->params_.pole_length;\n  };\n  __host__ __device__ float getGravity()\n  {\n    return gravity_;\n  }\n\n  void printState(const Eigen::Ref<const state_array>& state);\n  void printState(float* state);\n  void printParams();\n\n  // void computeKinematics(const Eigen::Ref<const state_array>&,\n  //                        Eigen::Ref<state_array>& state_der) {};\n\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta = nullptr);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n\nprotected:\n  const float gravity_ = 9.81;\n};\n\n#if __CUDACC__\n#include \"cartpole_dynamics.cu\"\n#endif\n\n#endif  // CARTPOLE_CUH_\n"
  },
  {
    "path": "include/mppi/dynamics/double_integrator/di_dynamics.cu",
    "content": "#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n\nDoubleIntegratorDynamics::DoubleIntegratorDynamics(float system_noise, cudaStream_t stream)\n  : Dynamics<DoubleIntegratorDynamics, DoubleIntegratorParams>(stream)\n{\n  this->params_ = DoubleIntegratorParams(system_noise);\n\n  // Seed the RNG and initialize the system noise distribution\n  std::random_device rd;\n  gen.seed(rd());  // Seed the RNG with a random number\n  setStateVariance(system_noise);\n}\n\nvoid DoubleIntegratorDynamics::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                               const Eigen::Ref<const control_array>& control,\n                                               Eigen::Ref<state_array> state_der)\n{\n  state_der(0) = state(2);    // xdot;\n  state_der(1) = state(3);    // ydot;\n  state_der(2) = control(0);  // x_force;\n  state_der(3) = control(1);  // y_force\n}\n\nbool DoubleIntegratorDynamics::computeGrad(const Eigen::Ref<const state_array>& state,\n                                           const Eigen::Ref<const control_array>& control, Eigen::Ref<dfdx> A,\n                                           Eigen::Ref<dfdu> B)\n{\n  A(0, 2) = 1;\n  A(1, 3) = 1;\n\n  B(2, 0) = 1;\n  B(3, 1) = 1;\n  return true;\n}\n\nvoid DoubleIntegratorDynamics::printState(float* state)\n{\n  printf(\"X position: %f; Y position: %f; X velocity: %f; Y velocity: %f \\n\", state[0], state[1], state[2], state[3]);\n}\n\nvoid DoubleIntegratorDynamics::printState(const float* state)\n{\n  printf(\"X position: %f; Y position: %f; X velocity: %f; Y velocity: %f \\n\", state[0], state[1], state[2], state[3]);\n}\n\n__device__ void DoubleIntegratorDynamics::computeDynamics(float* state, float* control, float* state_der,\n                                                          float* theta_s)\n{\n  state_der[0] = state[2];    // xdot;\n  state_der[1] = state[3];    // ydot;\n  state_der[2] = control[0];  // x_force;\n  state_der[3] = control[1];  // y_force\n}\n\nvoid DoubleIntegratorDynamics::setStateVariance(float system_variance)\n{\n  normal_distribution = std::normal_distribution<float>(0, sqrtf(system_variance));\n}\n\nvoid DoubleIntegratorDynamics::computeStateDisturbance(float dt, Eigen::Ref<state_array> state)\n{\n  // Generate system noise\n  state_array system_noise;\n  system_noise << 0.0, 0.0, normal_distribution(gen), normal_distribution(gen);\n  state += system_noise * dt;\n}\n\nDoubleIntegratorDynamics::dfdu DoubleIntegratorDynamics::B(const Eigen::Ref<const state_array>& state)\n{\n  dfdu B = dfdu::Zero();\n  B(2, 0) = 1;\n  B(3, 1) = 1;\n  return B;\n}\n\nDynamics<DoubleIntegratorDynamics, DoubleIntegratorParams>::state_array\nDoubleIntegratorDynamics::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s;\n  s(0) = map.at(\"POS_X\");\n  s(1) = map.at(\"POS_Y\");\n  s(2) = map.at(\"VEL_X\");\n  s(3) = map.at(\"VEL_Y\");\n  return s;\n}\n"
  },
  {
    "path": "include/mppi/dynamics/double_integrator/di_dynamics.cuh",
    "content": "#pragma once\n\n#ifndef DOUBLE_INTEGRATOR_CUH_\n#define DOUBLE_INTEGRATOR_CUH_\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <random>\n\nstruct DoubleIntegratorParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    VEL_X,\n    VEL_Y,\n    NUM_STATES\n  };\n  enum class ControlIndex : int\n  {\n    ACCEL_X = 0,\n    ACCEL_Y,\n    NUM_CONTROLS\n  };\n  enum class OutputIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    VEL_X,\n    VEL_Y,\n    NUM_OUTPUTS\n  };\n  float system_noise = 1;\n  DoubleIntegratorParams(float noise) : system_noise(noise){};\n  DoubleIntegratorParams() = default;\n  ~DoubleIntegratorParams() = default;\n};\n\nusing namespace MPPI_internal;\n\nclass DoubleIntegratorDynamics : public Dynamics<DoubleIntegratorDynamics, DoubleIntegratorParams>\n{\npublic:\n  //  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  DoubleIntegratorDynamics(float system_noise = 1, cudaStream_t stream = nullptr);\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"2D Double Integrator Model\";\n  }\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  void computeStateDisturbance(float dt, Eigen::Ref<state_array> state);\n\n  void setStateVariance(float system_variance = 1.0);\n\n  void printState(float* state);\n  void printState(const float* state);\n\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta = nullptr);\n\n  dfdu B(const Eigen::Ref<const state_array>& state);\n\n  state_array stateFromMap(const std::map<std::string, float>& map);\n\nprivate:\n  // Random number generator for system noise\n  std::mt19937 gen;  // Standard mersenne_twister_engine which will be seeded\n  std::normal_distribution<float> normal_distribution;\n};\n\n#if __CUDACC__\n#include \"di_dynamics.cu\"\n#endif\n\n#endif  //! DOUBLE_INTEGRATOR_CUH_\n"
  },
  {
    "path": "include/mppi/dynamics/dubins/dubins.cu",
    "content": "#include <mppi/dynamics/dubins/dubins.cuh>\n\nDubinsDynamics::DubinsDynamics(cudaStream_t stream) : Dynamics<DubinsDynamics, DubinsParams>(stream)\n{\n  this->params_ = DubinsParams();\n}\n\nvoid DubinsDynamics::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                     const Eigen::Ref<const control_array>& control, Eigen::Ref<state_array> state_der)\n{\n  state_der(S_INDEX(POS_X)) = control(C_INDEX(VEL)) * cos(state(S_INDEX(YAW)));\n  state_der(S_INDEX(POS_Y)) = control(C_INDEX(VEL)) * sin(state(S_INDEX(YAW)));\n  state_der(S_INDEX(YAW)) = control(C_INDEX(YAW_RATE));\n}\n\nbool DubinsDynamics::computeGrad(const Eigen::Ref<const state_array>& state,\n                                 const Eigen::Ref<const control_array>& control, Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n{\n  A(0, 2) = -control(C_INDEX(VEL)) * sin(state(S_INDEX(YAW)));\n  A(1, 2) = control(C_INDEX(VEL)) * cos(state(S_INDEX(YAW)));\n\n  B(0, 0) = cos(state(S_INDEX(YAW)));\n  B(1, 0) = sin(state(S_INDEX(YAW)));\n  B(2, 1) = 1;\n  return true;\n}\n\nvoid DubinsDynamics::updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                                 Eigen::Ref<state_array> state_der, const float dt)\n{\n  next_state = state + state_der * dt;\n  next_state(S_INDEX(YAW)) = angle_utils::normalizeAngle(next_state(S_INDEX(YAW)));\n}\n\nDubinsDynamics::state_array DubinsDynamics::interpolateState(const Eigen::Ref<state_array> state_1,\n                                                             const Eigen::Ref<state_array> state_2, const float alpha)\n{\n  state_array result = (1 - alpha) * state_1 + alpha * state_2;\n  result(2) = angle_utils::interpolateEulerAngleLinear(state_1(2), state_2(2), alpha);\n  return result;\n}\n\n__device__ void DubinsDynamics::updateState(float* state, float* next_state, float* state_der, const float dt)\n{\n  int i;\n  int tdy = threadIdx.y;\n  // Add the state derivative time dt to the current state.\n  // printf(\"updateState thread %d, %d = %f, %f\\n\", threadIdx.x, threadIdx.y, state[S_INDEX(POS_X)],\n  // state_der[S_INDEX(POS_X)]);\n  for (i = tdy; i < STATE_DIM; i += blockDim.y)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n    if (i == S_INDEX(YAW))\n    {\n      next_state[i] = angle_utils::normalizeAngle(next_state[i]);\n    }\n  }\n}\n\n__device__ void DubinsDynamics::computeDynamics(float* state, float* control, float* state_der, float* theta_s)\n{\n  state_der[S_INDEX(POS_X)] = control[C_INDEX(VEL)] * cos(state[S_INDEX(YAW)]);\n  state_der[S_INDEX(POS_Y)] = control[C_INDEX(VEL)] * sin(state[S_INDEX(YAW)]);\n  state_der[S_INDEX(YAW)] = control[C_INDEX(YAW_RATE)];\n}\n\nDynamics<DubinsDynamics, DubinsParams>::state_array\nDubinsDynamics::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s;\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  return s;\n}\n"
  },
  {
    "path": "include/mppi/dynamics/dubins/dubins.cuh",
    "content": "#pragma once\n\n#ifndef DUBINS_CUH_\n#define DUBINS_CUH_\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/utils/angle_utils.cuh>\n#include <random>\n\nstruct DubinsParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    YAW,\n    NUM_STATES\n  };\n\n  enum class ControlIndex : int\n  {\n    VEL = 0,\n    YAW_RATE,\n    NUM_CONTROLS\n  };\n\n  enum class OutputIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    YAW,\n    NUM_OUTPUTS\n  };\n  DubinsParams() = default;\n  ~DubinsParams() = default;\n};\n\nusing namespace MPPI_internal;\n/**\n * state: x, y, theta\n * control: forward velocity, angular velocity\n */\nclass DubinsDynamics : public Dynamics<DubinsDynamics, DubinsParams>\n{\npublic:\n  DubinsDynamics(cudaStream_t stream = nullptr);\n  using PARENT_CLASS = Dynamics<DubinsDynamics, DubinsParams>;\n  using PARENT_CLASS::updateState;  // needed as overloading updateState here hides all parent versions of updateState\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"Dubins Model\";\n  }\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  state_array interpolateState(const Eigen::Ref<state_array> state_1, const Eigen::Ref<state_array> state_2,\n                               const float alpha);\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta = nullptr);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n\nprivate:\n};\n\n#if __CUDACC__\n#include \"dubins.cu\"\n#endif\n\n#endif  //! DUBINS_CUH_\n"
  },
  {
    "path": "include/mppi/dynamics/dynamics.cu",
    "content": "#include <mppi/dynamics/dynamics.cuh>\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid Dynamics<CLASS_T, PARAMS_T>::paramsToDevice(bool synchronize)\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(&model_d_->params_, &params_, sizeof(PARAMS_T), cudaMemcpyHostToDevice, stream_));\n\n    HANDLE_ERROR(cudaMemcpyAsync(&model_d_->control_rngs_, &control_rngs_, CONTROL_DIM * sizeof(float2),\n                                 cudaMemcpyHostToDevice, stream_));\n    if (synchronize)\n    {\n      HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid Dynamics<CLASS_T, PARAMS_T>::setDefaultControlRanges()\n{\n  for (int i = 0; i < CONTROL_DIM; i++)\n  {\n    control_rngs_[i].x = -FLT_MAX;\n    control_rngs_[i].y = FLT_MAX;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid Dynamics<CLASS_T, PARAMS_T>::setControlRanges(std::array<float2, CONTROL_DIM>& control_rngs, bool synchronize)\n{\n  for (int i = 0; i < CONTROL_DIM; i++)\n  {\n    control_rngs_[i].x = control_rngs[i].x;\n    control_rngs_[i].y = control_rngs[i].y;\n  }\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->model_d_->control_rngs_, this->control_rngs_, CONTROL_DIM * sizeof(float2),\n                                 cudaMemcpyHostToDevice, stream_));\n    if (synchronize)\n    {\n      HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid Dynamics<CLASS_T, PARAMS_T>::setControlDeadbands(std::array<float, CONTROL_DIM>& control_deadband,\n                                                      bool synchronize)\n{\n  for (int i = 0; i < CONTROL_DIM; i++)\n  {\n    control_deadband_[i] = control_deadband[i];\n  }\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->model_d_->control_deadband_, this->control_deadband_,\n                                 CONTROL_DIM * sizeof(float), cudaMemcpyHostToDevice, stream_));\n    if (synchronize)\n    {\n      HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid Dynamics<CLASS_T, PARAMS_T>::GPUSetup()\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  if (!GPUMemStatus_)\n  {\n    model_d_ = Managed::GPUSetup(derived);\n  }\n  else\n  {\n    this->logger_->debug(\"%s: GPU Memory already set\\n\", derived->getDynamicsModelName().c_str());\n  }\n  derived->paramsToDevice();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid Dynamics<CLASS_T, PARAMS_T>::freeCudaMem()\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFree(model_d_));\n    GPUMemStatus_ = false;\n    model_d_ = nullptr;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ inline void Dynamics<CLASS_T, PARAMS_T>::computeStateDeriv(float* state, float* control, float* state_der,\n                                                                      float* theta_s)\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  // only propagate a single state, i.e. thread.y = 0\n  // find the change in x,y,theta based off of the rest of the state\n  if (threadIdx.y == 0)\n  {\n    derived->computeKinematics(state, state_der);\n  }\n  derived->computeDynamics(state, control, state_der, theta_s);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void Dynamics<CLASS_T, PARAMS_T>::enforceConstraints(float* state, float* control)\n{\n  // TODO should control_rngs_ be a constant memory parameter\n  int i, p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  // parallelize setting the constraints with y dim\n  for (i = p_index; i < CONTROL_DIM; i += step)\n  {\n    if (fabsf(control[i]) < this->control_deadband_[i])\n    {\n      control[i] = this->zero_control_[i];\n    }\n    else\n    {\n      control[i] += this->control_deadband_[i] * -mppi::math::sign(control[i]);\n    }\n    control[i] = fminf(fmaxf(this->control_rngs_[i].x, control[i]), this->control_rngs_[i].y);\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void Dynamics<CLASS_T, PARAMS_T>::updateState(float* state, float* next_state, float* state_der,\n                                                         const float dt)\n{\n  int i, p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  // Add the state derivative time dt to the current state.\n  for (i = p_index; i < STATE_DIM; i += step)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ inline void Dynamics<CLASS_T, PARAMS_T>::step(float* state, float* next_state, float* state_der,\n                                                         float* control, float* output, float* theta_s, const float t,\n                                                         const float dt)\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  derived->computeStateDeriv(state, control, state_der, theta_s);\n  __syncthreads();\n  derived->updateState(state, next_state, state_der, dt);\n  __syncthreads();\n  derived->stateToOutput(next_state, output);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ inline void Dynamics<CLASS_T, PARAMS_T>::stateToOutput(const float* __restrict__ state,\n                                                                           float* __restrict__ output)\n{\n  // TODO this is a hack\n  int p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  for (int i = p_index; i < OUTPUT_DIM && i < STATE_DIM; i += step)\n  {\n    output[i] = state[i];\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ inline void Dynamics<CLASS_T, PARAMS_T>::outputToState(const float* __restrict__ output,\n                                                                           float* __restrict__ state)\n{\n  // TODO this is a hack\n  int p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  for (int i = p_index; i < OUTPUT_DIM && i < STATE_DIM; i += step)\n  {\n    state[i] = output[i];\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nDynamics<CLASS_T, PARAMS_T>::state_array Dynamics<CLASS_T, PARAMS_T>::getZeroState() const\n{\n  return state_array::Zero();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ inline void Dynamics<CLASS_T, PARAMS_T>::getZeroState(float* state) const\n{\n  int p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  for (int i = p_index; i < STATE_DIM; i += step)\n  {\n    state[i] = 0.0f;\n  }\n}\n"
  },
  {
    "path": "include/mppi/dynamics/dynamics.cuh",
    "content": "#pragma once\n\n#ifndef DYNAMICS_CUH_\n#define DYNAMICS_CUH_\n\n/*\nHeader file for dynamics\n*/\n\n#include <Eigen/Dense>\n#include <mppi/utils/managed.cuh>\n#include <mppi/utils/angle_utils.cuh>\n#include <mppi/utils/math_utils.h>\n\n#include <stdio.h>\n#include <math.h>\n\n#include <array>\n#include <cfloat>\n#include <type_traits>\n#include <vector>\n#include <map>\n#include <string>\n\n// helpful macros to use the enum setup\n#ifndef E_INDEX\n#define E_INDEX(ENUM, enum_val) static_cast<int>(ENUM::enum_val)\n#endif\n#ifndef S_INDEX\n#define S_IND_CLASS(CLASS, enum_val) E_INDEX(CLASS::StateIndex, enum_val)\n#define S_INDEX(enum_val) S_IND_CLASS(PARENT_CLASS::DYN_PARAMS_T, enum_val)\n#endif\n\n#ifndef C_INDEX\n#define C_IND_CLASS(CLASS, enum_val) E_INDEX(CLASS::ControlIndex, enum_val)\n#define C_INDEX(enum_val) C_IND_CLASS(PARENT_CLASS::DYN_PARAMS_T, enum_val)\n#endif\n\n#ifndef O_INDEX\n#define O_IND_CLASS(CLASS, enum_val) E_INDEX(CLASS::OutputIndex, enum_val)\n#define O_INDEX(enum_val) O_IND_CLASS(PARENT_CLASS::DYN_PARAMS_T, enum_val)\n#endif\n\nstruct DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    NUM_STATES\n  };\n  enum class ControlIndex : int\n  {\n    VEL_X = 0,\n    NUM_CONTROLS\n  };\n  enum class OutputIndex : int\n  {\n    POS_X = 0,\n    NUM_OUTPUTS\n  };\n};\n\ntemplate <typename T>\nusing paramsInheritsFrom = typename std::is_base_of<DynamicsParams, T>;\n\nnamespace MPPI_internal\n{\ntemplate <class CLASS_T, class PARAMS_T>\nclass Dynamics : public Managed\n{\n  static_assert(paramsInheritsFrom<PARAMS_T>::value, \"Dynamics PARAMS_T does not inherit from DynamicsParams\");\n\npublic:\n  //  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  static const int STATE_DIM = S_IND_CLASS(PARAMS_T, NUM_STATES);\n  static const int CONTROL_DIM = C_IND_CLASS(PARAMS_T, NUM_CONTROLS);\n  static const int OUTPUT_DIM = O_IND_CLASS(PARAMS_T, NUM_OUTPUTS);\n  typedef CLASS_T DYN_T;\n  typedef PARAMS_T DYN_PARAMS_T;\n\n  /**\n   * useful typedefs\n   */\n  typedef Eigen::Matrix<float, CONTROL_DIM, 1> control_array;                 // Control at a time t\n  typedef Eigen::Matrix<float, STATE_DIM, 1> state_array;                     // State at a time t\n  typedef Eigen::Matrix<float, OUTPUT_DIM, 1> output_array;                   // Output at a time t\n  typedef Eigen::Matrix<float, STATE_DIM, STATE_DIM> dfdx;                    // Jacobian wrt x\n  typedef Eigen::Matrix<float, STATE_DIM, CONTROL_DIM> dfdu;                  // Jacobian wrt u\n  typedef Eigen::Matrix<float, CONTROL_DIM, STATE_DIM> feedback_matrix;       // Feedback matrix\n  typedef Eigen::Matrix<float, STATE_DIM, STATE_DIM + CONTROL_DIM> Jacobian;  // Jacobian of x and u\n\n  typedef std::map<std::string, Eigen::VectorXf> buffer_trajectory;\n\n  // protected constructor prevent anyone from trying to construct a Dynamics\nprotected:\n  /**\n   * sets the default control ranges to -infinity and +infinity\n   */\n  Dynamics(cudaStream_t stream = 0) : Managed(stream)\n  {\n    setDefaultControlRanges();\n  }\n\n  /**\n   * sets the control ranges to the passed in value\n   * @param control_rngs\n   * @param stream\n   */\n  Dynamics(std::array<float2, CONTROL_DIM>& control_rngs, cudaStream_t stream = 0) : Managed(stream)\n  {\n    setControlRanges(control_rngs);\n  }\n\n  Dynamics(PARAMS_T& params, std::array<float2, CONTROL_DIM>& control_rngs, cudaStream_t stream = 0) : Managed(stream)\n  {\n    setParams(params);\n    setControlRanges(control_rngs);\n  }\n\n  Dynamics(PARAMS_T& params, cudaStream_t stream = 0) : Managed(stream)\n  {\n    setParams(params);\n    setDefaultControlRanges();\n  }\n\npublic:\n  // This variable defines what the zero control is\n  // For most systems, it should be zero but for things like a quadrotor,\n  // it should be the command to hover\n  control_array zero_control_ = control_array::Zero();\n\n  /**\n   * Destructor must be virtual so that children are properly\n   * destroyed when called from a Dynamics reference\n   */\n  virtual ~Dynamics()\n  {\n    freeCudaMem();\n  }\n\n  virtual std::string getDynamicsModelName() const\n  {\n    return \"Dynamics model name not set\";\n  }\n\n  /**\n   * Allocates all of the GPU memory\n   */\n  void GPUSetup();\n\n  void setZeroControl(const Eigen::Ref<const control_array>& zero_control)\n  {\n    zero_control_ = zero_control;\n  }\n  control_array getZeroControl() const\n  {\n    return zero_control_;\n  }\n\n  std::array<float2, CONTROL_DIM> getControlRanges()\n  {\n    std::array<float2, CONTROL_DIM> result;\n    for (int i = 0; i < CONTROL_DIM; i++)\n    {\n      result[i] = control_rngs_[i];\n    }\n    return result;\n  }\n  __host__ __device__ float2* getControlRangesRaw()\n  {\n    return control_rngs_;\n  }\n\n  void setControlRanges(std::array<float2, CONTROL_DIM>& control_rngs, bool synchronize = true);\n\n  void setControlDeadbands(std::array<float, CONTROL_DIM>& control_deadband, bool synchronize = true);\n\n  void setParams(const PARAMS_T& params)\n  {\n    params_ = params;\n    if (GPUMemStatus_)\n    {\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice();\n    }\n  }\n\n  __device__ __host__ PARAMS_T getParams()\n  {\n    return params_;\n  }\n\n  /*\n   *\n   */\n  void freeCudaMem();\n\n  /**\n   *\n   */\n  void printState(float* state);\n\n  /**\n   *\n   */\n  // TODO should not assume it is going to cout, pass in stream\n  void printParams();\n\n  /**\n   *\n   */\n  void paramsToDevice(bool synchronize = true);\n\n  /**\n   * loads the .npz at given path\n   * @param model_path\n   */\n  void loadParams(const std::string& model_path);\n\n  /**\n   * updates the internals of the dynamics model\n   * @param description\n   * @param data\n   */\n  // TODO generalize\n  void updateModel(std::vector<int> description, std::vector<float> data);\n\n  /**\n   * compute the Jacobians with respect to state and control\n   *\n   * @param state   input of current state, passed by reference\n   * @param control input of currrent control, passed by reference\n   * @param A       output Jacobian wrt state, passed by reference\n   * @param B       output Jacobian wrt control, passed by reference\n   */\n  bool computeGrad(const Eigen::Ref<const state_array>& state = state_array(),\n                   const Eigen::Ref<const control_array>& control = control_array(), Eigen::Ref<dfdx> A = dfdx(),\n                   Eigen::Ref<dfdu> B = dfdu())\n  {\n    return false;\n  }\n  /**\n   * enforces control constraints\n   * @param state\n   * @param control\n   */\n  void enforceConstraints(Eigen::Ref<state_array> state, Eigen::Ref<control_array> control)\n  {\n    for (int i = 0; i < CONTROL_DIM; i++)\n    {\n      if (fabsf(control[i]) < this->control_deadband_[i])\n      {\n        control[i] = this->zero_control_[i];\n      }\n      else\n      {\n        control[i] += this->control_deadband_[i] * -mppi::math::sign(control[i]);\n      }\n      control[i] = fminf(fmaxf(this->control_rngs_[i].x, control[i]), this->control_rngs_[i].y);\n    }\n  }\n\n  /**\n   * updates the current state using s_der\n   * @param s state\n   * @param s_der\n   */\n  DEPRECATED void updateState(Eigen::Ref<state_array> state, Eigen::Ref<state_array> state_der, const float dt)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    derived->updateState(state, state, state_der, dt);\n  }\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt)\n  {\n    next_state = state + state_der * dt;\n  }\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    derived->computeStateDeriv(state, control, state_der);\n    derived->updateState(state, next_state, state_der, dt);\n    derived->stateToOutput(next_state, output);\n  }\n\n  void stateToOutput(const Eigen::Ref<const state_array>& state, Eigen::Ref<output_array> output)\n  {\n    // TODO this is a hack\n    for (int i = 0; i < OUTPUT_DIM && i < STATE_DIM; i++)\n    {\n      output[i] = state[i];\n    }\n  }\n\n  __host__ __device__ void stateToOutput(const float* __restrict__ state, float* __restrict__ output);\n\n  __host__ __device__ void outputToState(const float* __restrict__ output, float* __restrict__ state);\n\n  state_array getZeroState() const;\n\n  __host__ __device__ void getZeroState(float* state) const;\n\n  /**\n   * does a linear interpolation of states\n   * @param state_1\n   * @param state_2\n   * @param alpha\n   * @return\n   */\n  state_array interpolateState(const Eigen::Ref<const state_array>& state_1,\n                               const Eigen::Ref<const state_array>& state_2, const float alpha)\n  {\n    return (1 - alpha) * state_1 + alpha * state_2;\n  }\n\n  /**\n   * computes a specific state error\n   * @param pred_state\n   * @param true_state\n   * @return\n   */\n  state_array computeStateError(const Eigen::Ref<const state_array>& pred_state,\n                                const Eigen::Ref<const state_array>& true_state)\n  {\n    return pred_state - true_state;\n  }\n\n  /**\n   * computes the section of the state derivative that comes form the dyanmics\n   * @param state\n   * @param control\n   * @param state_der\n   */\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  /**\n   * computes the parts of the state that are based off of kinematics\n   * @param s state\n   * @param s_der\n   */\n  void computeKinematics(const Eigen::Ref<const state_array>& state, Eigen::Ref<state_array> s_der){};\n\n  /**\n   * computes the full state derivative by calling computeKinematics then computeDynamics\n   * @param state\n   * @param control\n   * @param state_der\n   */\n  void computeStateDeriv(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                         Eigen::Ref<state_array> state_der)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    derived->computeKinematics(state, state_der);\n    derived->computeDynamics(state, control, state_der);\n  }\n\n  /**\n   * computes the section of the state derivative that comes form the dyanmics\n   * @param state\n   * @param control\n   * @param state_der\n   * @param theta_s shared memory that can be used when computation is computed across the same block\n   */\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta_s = nullptr);\n\n  /**\n   * computes the parts of the state that are based off of kinematics\n   * parallelized on X only\n   * @param state\n   * @param state_der\n   */\n  __device__ void computeKinematics(float* state, float* state_der){};\n\n  /**\n   * @param state\n   * @param control\n   * @param state_der\n   * @param theta_s shared memory that can be used when computation is computed across the same block\n   */\n  __device__ inline void computeStateDeriv(float* state, float* control, float* state_der, float* theta_s);\n\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n\n  /**\n   * applies the state derivative\n   * @param state\n   * @param state_der\n   * @param dt\n   */\n  DEPRECATED __device__ void updateState(float* state, float* state_der, const float dt)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    derived->updateState(state, state, state_der, dt);\n  }\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  /**\n   * enforces control constraints\n   */\n  __device__ void enforceConstraints(float* state, float* control);\n\n  /**\n   * Method to allow setup of dynamics on the GPU. This is needed for\n   * initializing the memory of an LSTM for example\n   */\n  void initializeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                          Eigen::Ref<output_array> output, float t_0, float dt)\n  {\n    for (int i = 0; i < OUTPUT_DIM && i < STATE_DIM; i++)\n    {\n      output[i] = state[i];\n    }\n  }\n\n  /**\n   * Method to allow setup of dynamics on the GPU. This is needed for\n   * initializing the memory of an LSTM for example\n   */\n  __device__ void initializeDynamics(float* state, float* control, float* output, float* theta_s, float t_0, float dt)\n  {\n    for (int i = 0; i < OUTPUT_DIM && i < STATE_DIM; i++)\n    {\n      output[i] = state[i];\n    }\n  }\n\n  /**\n   * Method to compute an emergency stopping control\n   */\n  virtual void getStoppingControl(const Eigen::Ref<const state_array>& state, Eigen::Ref<control_array> u)\n  {\n    u.setZero();\n  }\n\n  /**\n   * Method to enforce a leash on the initial state, which depends on type of dynamics.\n   */\n  virtual void enforceLeash(const Eigen::Ref<const state_array>& state_true,\n                            const Eigen::Ref<const state_array>& state_nominal,\n                            const Eigen::Ref<const state_array>& leash_values, Eigen::Ref<state_array> state_output)\n  {\n    for (int i = 0; i < DYN_T::STATE_DIM; i++)\n    {\n      float diff = fabsf(state_nominal[i] - state_true[i]);\n\n      if (leash_values[i] < diff)\n      {\n        float leash_dir = fminf(fmaxf(state_nominal[i] - state_true[i], -leash_values[i]), leash_values[i]);\n        state_output[i] = state_true[i] + leash_dir;\n      }\n      else\n      {\n        state_output[i] = state_nominal[i];\n      }\n    }\n  }\n\n  virtual bool checkRequiresBuffer()\n  {\n    return requires_buffer_;\n  }\n\n  bool updateFromBuffer(const buffer_trajectory& buffer)\n  {\n    return true;\n  }\n\n  bool checkIfKeysInBuffer(const buffer_trajectory& buffer, std::vector<std::string>& needed_keys)\n  {\n    bool missing_key = false;\n    for (const auto& key : needed_keys)\n    {\n      if (buffer.find(key) == buffer.end())\n      {  // Print out all missing keys\n        this->logger_->warning(\"Could not find key %s in init buffer for %s\\n\", key.c_str(),\n                               this->getDynamicsModelName().c_str());\n        missing_key = true;\n      }\n    }\n    return missing_key;\n  }\n\n  bool checkIfKeysInMap(const std::map<std::string, float>& map, std::vector<std::string>& needed_keys)\n  {\n    bool missing_key = false;\n    for (const auto& key : needed_keys)\n    {\n      if (map.find(key) == map.end())\n      {  // Print out all missing keys\n        this->logger_->warning(\"Could not find key %s in state map for %s\\n\", key.c_str(),\n                               this->getDynamicsModelName().c_str());\n        missing_key = true;\n      }\n    }\n    return missing_key;\n  }\n\n  virtual state_array stateFromMap(const std::map<std::string, float>& map) = 0;\n\n  // control ranges [.x, .y]\n  float2 control_rngs_[CONTROL_DIM];\n  float control_deadband_[CONTROL_DIM] = { 0.0f };\n\n  // device pointer, null on the device\n  CLASS_T* model_d_ = nullptr;\n\nprotected:\n  // generic parameter structure\n  PARAMS_T params_;\n\n  bool requires_buffer_ = false;\n\nprivate:\n  void setDefaultControlRanges();\n};\n\n#ifdef __CUDACC__\n#include \"dynamics.cu\"\n#endif\n\ntemplate <class CLASS_T, class PARAMS_T>\nconst int Dynamics<CLASS_T, PARAMS_T>::STATE_DIM;\n\ntemplate <class CLASS_T, class PARAMS_T>\nconst int Dynamics<CLASS_T, PARAMS_T>::CONTROL_DIM;\n\ntemplate <class CLASS_T, class PARAMS_T>\nconst int Dynamics<CLASS_T, PARAMS_T>::OUTPUT_DIM;\n}  // namespace MPPI_internal\n#endif  // DYNAMICS_CUH_\n"
  },
  {
    "path": "include/mppi/dynamics/linear/linear.cu",
    "content": "/**\n * @file linear.cu\n * @brief Linear Dynamics\n * @author Bogdan Vlahov\n * @version\n * @date 2024-08-16\n */\n#include <mppi/dynamics/racer_dubins/racer_dubins.cuh>\n#include <string>\n#include <mppi/utils/math_utils.h>\n\nnamespace mm = mppi::matrix_multiplication;\nnamespace mp1 = mppi::p1;\n\n#define LINEAR_TEMPLATE template <class CLASS_T, class PARAMS_T>\n#define LINEAR_DYNAMICS LinearDynamicsImpl<CLASS_T, PARAMS_T>\n\nLINEAR_TEMPLATE LINEAR_DYNAMICS::LinearDynamicsImpl(cudaStream_t stream) : PARENT_CLASS(stream)\n{\n  this->SHARED_MEM_REQUEST_GRD_BYTES = sizeof(PARAMS_T);\n}\n\nLINEAR_TEMPLATE LINEAR_DYNAMICS::LinearDynamicsImpl(PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS(params, stream)\n{\n  this->SHARED_MEM_REQUEST_GRD_BYTES = sizeof(PARAMS_T);\n}\n\nLINEAR_TEMPLATE\nbool LINEAR_DYNAMICS::computeGrad(const Eigen::Ref<const state_array>& state,\n                                  const Eigen::Ref<const control_array>& control, Eigen::Ref<dfdx> A,\n                                  Eigen::Ref<dfdu> B)\n{\n  A = this->params_.getA();\n  B = this->params_.getB();\n  return true;\n}\n\nLINEAR_TEMPLATE\nLINEAR_DYNAMICS::state_array LINEAR_DYNAMICS::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array x = this->getZeroState();\n  for (int i = 0; i < this->STATE_DIM; i++)\n  {\n    std::string state_name = \"x_\" + std::to_string(i);\n    x(i, 0) = map.at(state_name);\n  }\n  return x;\n}\n\nLINEAR_TEMPLATE\nvoid LINEAR_DYNAMICS::step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state,\n                           Eigen::Ref<state_array> state_der, const Eigen::Ref<const control_array>& control,\n                           Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  state_der = this->params_.getA() * state + this->params_.getB() * control;\n  next_state = state + state_der * dt;\n  this->stateToOutput(next_state, output);\n}\n\nLINEAR_TEMPLATE\n__device__ inline void LINEAR_DYNAMICS::step(float* state, float* next_state, float* state_der, float* control,\n                                             float* output, float* theta_s, const float t, const float dt)\n{\n  PARAMS_T* params_p = &(this->params_);\n  if (this->getGrdSharedSizeBytes() != 0)\n  {\n    params_p = (PARAMS_T*)theta_s;\n  }\n  float* A = params_p->A;\n  float* B = params_p->B;\n\n  const mp1::Parallel1Dir P_DIR = mp1::Parallel1Dir::THREAD_Y;\n  mm::gemm1<this->STATE_DIM, this->STATE_DIM, 1, P_DIR>(A, state, state_der);\n  mm::gemm1<this->STATE_DIM, this->CONTROL_DIM, 1, P_DIR>(B, control, state_der, 1.0f, 1.0f);\n  // __syncthreads();\n  int index, step;\n  mp1::getParallel1DIndex<P_DIR>(index, step);\n  for (int i = index; i < this->STATE_DIM; i += step)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n    output[i] = next_state[i];\n  }\n}\n\nLINEAR_TEMPLATE\n__device__ void LINEAR_DYNAMICS::initializeDynamics(float* state, float* control, float* output, float* theta_s,\n                                                    float t_0, float dt)\n{\n  PARENT_CLASS::initializeDynamics(state, control, output, theta_s, t_0, dt);\n  if (this->getGrdSharedSizeBytes() != 0)\n  {\n    PARAMS_T* shared = (PARAMS_T*)theta_s;\n    *shared = this->params_;\n  }\n}\n\n#undef LINEAR_TEMPLATE\n#undef LINEAR_DYNAMICS\n"
  },
  {
    "path": "include/mppi/dynamics/linear/linear.cuh",
    "content": "/**\n * @file linear.cuh\n * @brief Linear Dynamics\n * @author Bogdan Vlahov\n * @version\n * @date 2024-08-16\n */\n#pragma once\n\n#include <mppi/dynamics/dynamics.cuh>\n\ntemplate <int STATE_DIM = 1, int CONTROL_DIM = 1>\nstruct LinearDynamicsParams : public DynamicsParams\n{\npublic:\n  using state_matrix = Eigen::Matrix<float, STATE_DIM, STATE_DIM>;\n  using control_matrix = Eigen::Matrix<float, STATE_DIM, CONTROL_DIM>;\n  enum class StateIndex : int\n  {\n    NUM_STATES = STATE_DIM,\n  };\n\n  enum class ControlIndex : int\n  {\n    NUM_CONTROLS = CONTROL_DIM,\n  };\n  enum class OutputIndex : int\n  {\n    NUM_OUTPUTS = STATE_DIM,\n  };\n  float A[STATE_DIM * STATE_DIM] = { 0.0f };\n  float B[STATE_DIM * CONTROL_DIM] = { 0.0f };\n\n  LinearDynamicsParams() = default;\n  ~LinearDynamicsParams() = default;\n\n  void setA(const Eigen::Ref<const state_matrix>& A_eigen)\n  {\n    memcpy(A, A_eigen.data(), sizeof(float) * STATE_DIM * STATE_DIM);\n  }\n\n  void setB(const Eigen::Ref<const control_matrix>& B_eigen)\n  {\n    memcpy(B, B_eigen.data(), sizeof(float) * STATE_DIM * CONTROL_DIM);\n  }\n\n  float* getA() const\n  {\n    return A;\n  }\n  float* getB() const\n  {\n    return B;\n  }\n\n  Eigen::Map<state_matrix> getA()\n  {\n    Eigen::Map<state_matrix> A_eigen(A);\n    return A_eigen;\n  }\n\n  Eigen::Map<control_matrix> getB()\n  {\n    Eigen::Map<control_matrix> B_eigen(B);\n    return B_eigen;\n  }\n};\n\nusing namespace MPPI_internal;\n\ntemplate <class CLASS_T, class PARAMS_T = LinearDynamicsParams<1, 1>>\nclass LinearDynamicsImpl : public Dynamics<CLASS_T, PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = Dynamics<CLASS_T, PARAMS_T>;\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n  using output_array = typename PARENT_CLASS::output_array;\n  using dfdx = typename PARENT_CLASS::dfdx;\n  using dfdu = typename PARENT_CLASS::dfdu;\n  using PARENT_CLASS::initializeDynamics;\n\n  LinearDynamicsImpl(cudaStream_t stream = nullptr);\n  LinearDynamicsImpl(PARAMS_T& params, cudaStream_t stream = nullptr);\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"Linear Dynamics\";\n  }\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  state_array stateFromMap(const std::map<std::string, float>& map);\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  void setA(const Eigen::Ref<const dfdx>& A_eigen, bool synchronize = false)\n  {\n    this->params_.setA(A_eigen);\n    this->paramsToDevice(synchronize);\n  }\n\n  void setB(const Eigen::Ref<const dfdu>& B_eigen, bool synchronize = false)\n  {\n    this->params_.setB(B_eigen);\n    this->paramsToDevice(synchronize);\n  }\n\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n\n  __device__ void initializeDynamics(float* state, float* control, float* output, float* theta_s, float t_0, float dt);\n};\n\ntemplate <int STATE_DIM = 1, int CONTROL_DIM = 1>\nclass LinearDynamics\n  : public LinearDynamicsImpl<LinearDynamics<STATE_DIM, CONTROL_DIM>, LinearDynamicsParams<STATE_DIM, CONTROL_DIM>>\n{\npublic:\n  using PARENT_CLASS =\n      LinearDynamicsImpl<LinearDynamics<STATE_DIM, CONTROL_DIM>, LinearDynamicsParams<STATE_DIM, CONTROL_DIM>>;\n  using DYN_PARAMS_T = typename PARENT_CLASS::DYN_PARAMS_T;\n\n  LinearDynamics(cudaStream_t stream = nullptr) : PARENT_CLASS(stream)\n  {\n  }\n  LinearDynamics(DYN_PARAMS_T& params, cudaStream_t stream = nullptr) : PARENT_CLASS(params, stream)\n  {\n  }\n};\n\n#ifdef __CUDACC__\n#include \"linear.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/dynamics/quadrotor/quadrotor_dynamics.cu",
    "content": "#include <mppi/dynamics/quadrotor/quadrotor_dynamics.cuh>\n#include <mppi/utils/math_utils.h>\n\nQuadrotorDynamics::QuadrotorDynamics(std::array<float2, CONTROL_DIM> control_rngs, cudaStream_t stream)\n  : PARENT_CLASS(control_rngs, stream)\n{\n  this->params_ = QuadrotorDynamicsParams();\n  zero_control_[3] = mppi::math::GRAVITY;\n}\n\nQuadrotorDynamics::QuadrotorDynamics(cudaStream_t stream) : PARENT_CLASS(stream)\n{\n  this->params_ = QuadrotorDynamicsParams();\n  float2 thrust_rng;\n  thrust_rng.x = 0;\n  thrust_rng.y = 36;  // TODO Figure out if this is a reasonable amount of thrust\n  this->control_rngs_[3] = thrust_rng;\n  this->zero_control_[3] = mppi::math::GRAVITY;\n}\n\nvoid QuadrotorDynamics::printState(float* state)\n{\n  int precision = 4;\n  int total_char = precision + 4;\n  printf(\"Pos     x: %*.*f, y: %*.*f, z: %*.*f\\n\", total_char, precision, state[0], total_char, precision, state[1],\n         total_char, precision, state[2]);\n  printf(\"Vel     x: %*.*f, y: %*.*f, z: %*.*f\\n\", total_char, precision, state[3], total_char, precision, state[4],\n         total_char, precision, state[5]);\n  printf(\"Quat    w: %*.*f, x: %*.*f, y: %*.*f, z: %*.*f\\n\", total_char, precision, state[6], total_char, precision,\n         state[7], total_char, precision, state[8], total_char, precision, state[9]);\n  printf(\"Ang Vel x: %*.*f, y: %*.*f, z: %*.*f\\n\", total_char, precision, state[10], total_char, precision, state[11],\n         total_char, precision, state[12]);\n}\n\nbool QuadrotorDynamics::computeGrad(const Eigen::Ref<const state_array>& state,\n                                    const Eigen::Ref<const control_array>& control, Eigen::Ref<dfdx> A,\n                                    Eigen::Ref<dfdu> B)\n{\n  Eigen::Quaternionf q(state[6], state[7], state[8], state[9]);\n  Eigen::Matrix3f dcm_lb;\n  // dcm_lb = q.toRotationMatrix();\n  mppi::math::Quat2DCM(q, dcm_lb);\n\n  // I can do the math for everything except quaternions\n  A.setZero();\n  // x_d\n  A.block<3, 3>(0, 3).setIdentity();\n  // v_d TODO figure out derivative of v_d wrt q\n\n  // q_d TODO figure out derivative of q_d wrt q\n\n  // w_d\n  Eigen::Vector3f tau_inv;\n  tau_inv << 1 / this->params_.tau_roll, 1 / this->params_.tau_pitch, 1 / this->params_.tau_yaw;\n  A.block<3, 3>(10, 10) = -1 * tau_inv.asDiagonal();\n\n  B.setZero();\n  // x_d is empty\n  // v_d\n  B.block<3, 1>(3, 3) = dcm_lb.col(2) / this->params_.mass;\n  // q_d using omega2edot as reference\n  B.block<4, 3>(6, 0) << -0.5 * q.x(), -0.5 * q.y(), -0.5 * q.z(), 0.5 * q.w(), -0.5 * q.z(), 0.5 * q.y(), 0.5 * q.z(),\n      0.5 * q.w(), -0.5 * q.x(), -0.5 * q.y(), 0.5 * q.x(), 0.5 * q.w();\n\n  // w_d\n  B.block<3, 3>(10, 0) = tau_inv.asDiagonal();\n  return false;\n}\n\nvoid QuadrotorDynamics::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                        const Eigen::Ref<const control_array>& control,\n                                        Eigen::Ref<state_array> state_der)\n{\n  // Fill in\n  state_der.block<3, 1>(0, 0);\n  Eigen::Vector3f x_d, v_d, angular_speed_d, u_pqr;\n  Eigen::Matrix<float, 3, 1> angular_speed, v;\n  Eigen::Quaternionf q_d;\n  Eigen::Matrix<float, 3, 1> tau_inv;\n  float u_thrust = control[C_INDEX(THRUST)];\n\n  // Assume quaterion is w, x, y, z in state array\n  Eigen::Quaternionf q(state[6], state[7], state[8], state[9]);\n  u_pqr << control[C_INDEX(ANG_RATE_X)], control[C_INDEX(ANG_RATE_Y)], control[C_INDEX(ANG_RATE_Z)];\n  v = state.block<3, 1>(3, 0);\n  angular_speed << state(10), state(11), state(12);\n  tau_inv << 1 / this->params_.tau_roll, 1 / this->params_.tau_pitch, 1 / this->params_.tau_yaw;\n\n  Eigen::Matrix3f dcm_lb = Eigen::Matrix3f::Identity();\n\n  // x_d = v\n  x_d = v;\n\n  // v_d = Lvb * [0 0 T]' + g\n  mppi::math::Quat2DCM(q, dcm_lb);\n  v_d = (u_thrust / this->params_.mass) * dcm_lb.col(2);\n  v_d(2) -= mppi::math::GRAVITY;\n\n  // q_d = H(q) w\n  mppi::math::omega2edot(angular_speed(0), angular_speed(1), angular_speed(2), q, q_d);\n\n  // w_d = (u_pqr - w)/ tau\n  // Note we assume that a low level controller makes angular velocity tracking\n  // a first order system\n  angular_speed_d = tau_inv.cwiseProduct(u_pqr - angular_speed);\n\n  // Copy into state_deriv\n  state_der.block<3, 1>(0, 0) = x_d;\n  state_der.block<3, 1>(3, 0) = v_d;\n  state_der(6) = q_d.w();\n  state_der(7) = q_d.x();\n  state_der(8) = q_d.y();\n  state_der(9) = q_d.z();\n  state_der.block<3, 1>(10, 0) = angular_speed_d;\n}\n\nvoid QuadrotorDynamics::updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                                    Eigen::Ref<state_array> state_der, const float dt)\n{\n  PARENT_CLASS::updateState(state, next_state, state_der, dt);\n\n  // Renormalize quaternion\n  Eigen::Quaternionf q(next_state[6], next_state[7], next_state[8], next_state[9]);\n  next_state.block<4, 1>(6, 0) /= q.norm() * copysign(1.0, q.w());\n}\n\n__device__ void QuadrotorDynamics::computeDynamics(float* state, float* control, float* state_der, float* theta)\n{\n  //  Fill in\n  float* v = state + 3;\n  float* q = state + 6;\n  float* w = state + 10;\n\n  // Derivatives\n  float* x_d = state_der;\n  float* v_d = state_der + 3;\n  float* q_d = state_der + 6;\n  float* w_d = state_der + 10;\n\n  float* u_pqr = control;\n  float u_thrust = control[C_INDEX(THRUST)];\n\n  float dcm_lb[3][3];\n\n  int i;\n\n  // x_d = v\n  for (i = threadIdx.y; i < 3; i += blockDim.y)\n  {\n    x_d[i] = v[i];\n  }\n\n  // v_d = Lvb * [0 0 T]' + g\n  mppi::math::Quat2DCM(q, dcm_lb);\n  for (i = threadIdx.y; i < 3; i += blockDim.y)\n  {\n    v_d[i] = (u_thrust / this->params_.mass) * dcm_lb[i][2];\n  }\n  __syncthreads();\n  if (threadIdx.y == 0)\n  {\n    v_d[2] -= mppi::math::GRAVITY;\n  }\n\n  // q_d = H(q) w\n  mppi::math::omega2edot(w[0], w[1], w[2], q, q_d);\n\n  // w_d = (u - w) / tau\n  w_d[0] = (u_pqr[0] - w[0]) / this->params_.tau_roll;\n  w_d[1] = (u_pqr[1] - w[1]) / this->params_.tau_pitch;\n  w_d[2] = (u_pqr[2] - w[2]) / this->params_.tau_yaw;\n  __syncthreads();\n}\n\n__device__ void QuadrotorDynamics::updateState(float* state, float* next_state, float* state_der, float dt)\n{\n  PARENT_CLASS::updateState(state, next_state, state_der, dt);\n\n  int i = 0;\n  // renormalze quaternion\n  float* q = next_state + 6;\n  float q_norm = sqrtf(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]);\n  for (i = threadIdx.y; i < 4; i += blockDim.y)\n  {\n    q[i] /= q_norm * copysignf(1.0, q[0]);\n  }\n  // __syncthreads();\n}\n\nQuadrotorDynamics::state_array QuadrotorDynamics::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s;\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(POS_Z)) = map.at(\"POS_Z\");\n\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  s(S_INDEX(VEL_Y)) = map.at(\"VEL_Y\");\n  s(S_INDEX(VEL_Z)) = map.at(\"VEL_Z\");\n\n  s(S_INDEX(QUAT_X)) = map.at(\"Q_X\");\n  s(S_INDEX(QUAT_Y)) = map.at(\"Q_Y\");\n  s(S_INDEX(QUAT_Z)) = map.at(\"Q_Z\");\n  s(S_INDEX(QUAT_W)) = map.at(\"Q_W\");\n\n  s(S_INDEX(ANG_VEL_X)) = map.at(\"OMEGA_X\");\n  s(S_INDEX(ANG_VEL_Y)) = map.at(\"OMEGA_Y\");\n  s(S_INDEX(ANG_VEL_Z)) = map.at(\"OMEGA_Z\");\n  return QuadrotorDynamics::state_array();\n}\n\nQuadrotorDynamics::state_array QuadrotorDynamics::getZeroState() const\n{\n  state_array zero = state_array::Zero();\n  zero[S_INDEX(QUAT_W)] = 1.0f;\n  return zero;\n}\n\n__host__ __device__ void QuadrotorDynamics::getZeroState(float* state) const\n{\n  int p_index, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(p_index, step);\n  for (int i = p_index; i < STATE_DIM; i += step)\n  {\n    if (i == S_INDEX(QUAT_W))\n    {\n      state[i] = 1.0f;\n    }\n    else\n    {\n      state[i] = 0.0f;\n    }\n  }\n}\n"
  },
  {
    "path": "include/mppi/dynamics/quadrotor/quadrotor_dynamics.cuh",
    "content": "/*\n * Created on Tue Jun 02 2020 by Bogdan Vlahov\n *\n */\n#ifndef QUADROTOR_DYNAMICS_CUH_\n#define QUADROTOR_DYNAMICS_CUH_\n\n#include <mppi/dynamics/dynamics.cuh>\n\nstruct QuadrotorDynamicsParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    POS_Z,\n    VEL_X,\n    VEL_Y,\n    VEL_Z,\n    QUAT_W,\n    QUAT_X,\n    QUAT_Y,\n    QUAT_Z,\n    ANG_VEL_X,\n    ANG_VEL_Y,\n    ANG_VEL_Z,\n    NUM_STATES\n  };\n\n  enum class ControlIndex : int\n  {\n    ANG_RATE_X = 0,\n    ANG_RATE_Y,\n    ANG_RATE_Z,\n    THRUST,\n    NUM_CONTROLS\n  };\n\n  enum class OutputIndex : int\n  {\n    POS_X = 0,\n    POS_Y,\n    POS_Z,\n    VEL_X,\n    VEL_Y,\n    VEL_Z,\n    QUAT_W,\n    QUAT_X,\n    QUAT_Y,\n    QUAT_Z,\n    ANG_VEL_X,\n    ANG_VEL_Y,\n    ANG_VEL_Z,\n    NUM_OUTPUTS\n  };\n  float tau_roll = 0.25;\n  float tau_pitch = 0.25;\n  float tau_yaw = 0.25;\n  float mass = 1;  // kg\n  QuadrotorDynamicsParams(float mass_in) : mass(mass_in){};\n  QuadrotorDynamicsParams() = default;\n  ~QuadrotorDynamicsParams() = default;\n};\n\nusing namespace MPPI_internal;\n\nclass QuadrotorDynamics : public Dynamics<QuadrotorDynamics, QuadrotorDynamicsParams>\n{\n  /**\n   * State for this class is defined as follows:\n   *    x - position in 3D space (x, y, z) - meters\n   *    v - velocity in 3D space (v_x, v_y_ v_z) - meters/sec\n   *    q - quaternion (q_w, q_x, q_y, q_z)\n   *    w - angular velocities (roll_dot, pitch_dot, yaw_dot) - rad/sec\n   *\n   * Coordinate Frame is NWU\n   *\n   * Control:\n   *    roll_rate  - rad/sec\n   *    pitch_rate - rad/sec\n   *    yaw_rate   - rad/sec\n   *    thrust     - Newtons\n   */\npublic:\n  using PARENT_CLASS = Dynamics<QuadrotorDynamics, QuadrotorDynamicsParams>;\n\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n  using dfdx = typename PARENT_CLASS::dfdx;\n  using dfdu = typename PARENT_CLASS::dfdu;\n\n  // Constructor\n  QuadrotorDynamics(cudaStream_t stream = 0);\n  QuadrotorDynamics(std::array<float2, CONTROL_DIM> control_rngs, cudaStream_t stream = 0);\n\n  using PARENT_CLASS::updateState;  // needed as overloading updateState here hides all parent versions of updateState\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"Quadrotor Model\";\n  }\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n\n  void printState(float* state);\n\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta = nullptr);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n\n  __host__ __device__ void getZeroState(float* state) const;\n\n  state_array getZeroState() const;\n};\n\n#if __CUDACC__\n#include \"quadrotor_dynamics.cu\"\n#endif\n#endif  // QUADROTOR_DYNAMICS_CUH_\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins.cu",
    "content": "#include <mppi/dynamics/racer_dubins/racer_dubins.cuh>\n#include <mppi/utils/math_utils.h>\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsImpl<CLASS_T, PARAMS_T>::computeDynamics(const Eigen::Ref<const state_array>& state,\n                                                         const Eigen::Ref<const control_array>& control,\n                                                         Eigen::Ref<state_array> state_der)\n{\n  bool enable_brake = control(C_INDEX(THROTTLE_BRAKE)) < 0;\n\n  state_der(S_INDEX(BRAKE_STATE)) =\n      fminf(fmaxf((enable_brake * -control(C_INDEX(THROTTLE_BRAKE)) - state(S_INDEX(BRAKE_STATE))) *\n                      this->params_.brake_delay_constant,\n                  -this->params_.max_brake_rate_neg),\n            this->params_.max_brake_rate_pos);\n  // applying position throttle\n  state_der(S_INDEX(VEL_X)) =\n      (!enable_brake) * this->params_.c_t[0] * control(C_INDEX(THROTTLE_BRAKE)) * this->params_.gear_sign +\n      this->params_.c_b[0] * state(S_INDEX(BRAKE_STATE)) * (state(S_INDEX(VEL_X)) >= 0 ? -1 : 1) -\n      this->params_.c_v[0] * state(S_INDEX(VEL_X)) + this->params_.c_0;\n  state_der(S_INDEX(YAW)) = (state(S_INDEX(VEL_X)) / this->params_.wheel_base) *\n                            tan(state(S_INDEX(STEER_ANGLE)) / this->params_.steer_angle_scale);\n  float yaw, sin_yaw, cos_yaw;\n  yaw = state[S_INDEX(YAW)];\n  sincosf(yaw, &sin_yaw, &cos_yaw);\n  state_der(S_INDEX(POS_X)) = state(S_INDEX(VEL_X)) * cos_yaw;\n  state_der(S_INDEX(POS_Y)) = state(S_INDEX(VEL_X)) * sin_yaw;\n  state_der(S_INDEX(STEER_ANGLE)) =\n      (control(C_INDEX(STEER_CMD)) * this->params_.steer_command_angle_scale - state(S_INDEX(STEER_ANGLE))) *\n      this->params_.steering_constant;\n  state_der(S_INDEX(STEER_ANGLE)) =\n      fmaxf(fminf(state_der(S_INDEX(STEER_ANGLE)), this->params_.max_steer_rate), -this->params_.max_steer_rate);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nbool RacerDubinsImpl<CLASS_T, PARAMS_T>::computeGrad(const Eigen::Ref<const state_array>& state,\n                                                     const Eigen::Ref<const control_array>& control, Eigen::Ref<dfdx> A,\n                                                     Eigen::Ref<dfdu> B)\n{\n  return false;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsImpl<CLASS_T, PARAMS_T>::updateState(const Eigen::Ref<const state_array> state,\n                                                     Eigen::Ref<state_array> next_state,\n                                                     Eigen::Ref<state_array> state_der, const float dt)\n{\n  // Segmented it to ensure that roll and pitch don't get overwritten\n  for (int i = 0; i < 6; i++)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n  }\n  next_state(S_INDEX(YAW)) = angle_utils::normalizeAngle(next_state(S_INDEX(YAW)));\n  next_state(S_INDEX(STEER_ANGLE)) =\n      fmaxf(fminf(next_state(S_INDEX(STEER_ANGLE)), this->params_.max_steer_angle), -this->params_.max_steer_angle);\n  next_state(S_INDEX(STEER_ANGLE_RATE)) = state_der(S_INDEX(STEER_ANGLE));\n  next_state(S_INDEX(BRAKE_STATE)) =\n      fminf(fmaxf(next_state(S_INDEX(BRAKE_STATE)), 0.0f), -this->control_rngs_[C_INDEX(THROTTLE_BRAKE)].x);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nRacerDubinsImpl<CLASS_T, PARAMS_T>::state_array RacerDubinsImpl<CLASS_T, PARAMS_T>::interpolateState(\n    const Eigen::Ref<state_array> state_1, const Eigen::Ref<state_array> state_2, const float alpha)\n{\n  state_array result = (1 - alpha) * state_1 + alpha * state_2;\n  result(S_INDEX(YAW)) = angle_utils::interpolateEulerAngleLinear(state_1(S_INDEX(YAW)), state_2(S_INDEX(YAW)), alpha);\n  return result;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsImpl<CLASS_T, PARAMS_T>::updateState(float* state, float* next_state, float* state_der,\n                                                                const float dt)\n{\n  int i;\n  int tdy = threadIdx.y;\n  // Add the state derivative time dt to the current state.\n  // printf(\"updateState thread %d, %d = %f, %f\\n\", threadIdx.x, threadIdx.y, state[0], state_der[0]);\n  for (i = tdy; i < 6; i += blockDim.y)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n    if (i == S_INDEX(YAW))\n    {\n      next_state[i] = angle_utils::normalizeAngle(next_state[i]);\n    }\n    if (i == S_INDEX(STEER_ANGLE))\n    {\n      next_state[i] = fmaxf(fminf(next_state[i], this->params_.max_steer_angle), -this->params_.max_steer_angle);\n      next_state[S_INDEX(STEER_ANGLE_RATE)] = state_der[i];\n    }\n    if (i == S_INDEX(BRAKE_STATE))\n    {\n      next_state[i] = fminf(fmaxf(next_state[i], 0.0f), 1.0f);\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Quaternionf RacerDubinsImpl<CLASS_T, PARAMS_T>::attitudeFromState(const Eigen::Ref<const state_array>& state)\n{\n  float theta = state[S_INDEX(YAW)];\n  return Eigen::Quaternionf(cos(theta / 2), 0, 0, sin(theta / 2));\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Vector3f RacerDubinsImpl<CLASS_T, PARAMS_T>::positionFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Vector3f(state[S_INDEX(POS_X)], state[S_INDEX(POS_Y)], 0);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Vector3f RacerDubinsImpl<CLASS_T, PARAMS_T>::velocityFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Vector3f(state[S_INDEX(VEL_X)], 0, 0);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Vector3f RacerDubinsImpl<CLASS_T, PARAMS_T>::angularRateFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Vector3f(0, 0, 0);  // TODO compute yaw rate from steering angle & vel\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nRacerDubinsImpl<CLASS_T, PARAMS_T>::state_array RacerDubinsImpl<CLASS_T, PARAMS_T>::stateFromOdometry(\n    const Eigen::Quaternionf& q, const Eigen::Vector3f& pos, const Eigen::Vector3f& vel, const Eigen::Vector3f& omega)\n{\n  state_array s;\n  s.setZero();\n  s[S_INDEX(POS_X)] = pos[0];\n  s[S_INDEX(POS_Y)] = pos[1];\n  s[S_INDEX(VEL_X)] = vel[0];\n  float _roll, _pitch, yaw;\n  mppi::math::Quat2EulerNWU(q, _roll, _pitch, yaw);\n  s[S_INDEX(YAW)] = yaw;\n  return s;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsImpl<CLASS_T, PARAMS_T>::computeDynamics(float* state, float* control, float* state_der,\n                                                                    float* theta_s)\n{\n  bool enable_brake = control[C_INDEX(THROTTLE_BRAKE)] < 0;\n\n  state_der[S_INDEX(BRAKE_STATE)] =\n      fminf(fmaxf((enable_brake * -control[C_INDEX(THROTTLE_BRAKE)] - state[S_INDEX(BRAKE_STATE)]) *\n                      this->params_.brake_delay_constant,\n                  -this->params_.max_brake_rate_neg),\n            this->params_.max_brake_rate_pos);\n\n  // applying position throttle\n  state_der[S_INDEX(VEL_X)] =\n      (!enable_brake) * this->params_.c_t[0] * control[0] * this->params_.gear_sign +\n      this->params_.c_b[0] * state[S_INDEX(BRAKE_STATE)] * (state[S_INDEX(VEL_X)] >= 0 ? -1 : 1) -\n      this->params_.c_v[0] * state[S_INDEX(VEL_X)] + this->params_.c_0;\n  state_der[S_INDEX(YAW)] = (state[S_INDEX(VEL_X)] / this->params_.wheel_base) *\n                            tan(state[S_INDEX(STEER_ANGLE)] / this->params_.steer_angle_scale);\n  float yaw, sin_yaw, cos_yaw;\n  yaw = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  __sincosf(yaw, &sin_yaw, &cos_yaw);\n  state_der[S_INDEX(POS_X)] = state[S_INDEX(VEL_X)] * cos_yaw;\n  state_der[S_INDEX(POS_Y)] = state[S_INDEX(VEL_X)] * sin_yaw;\n  state_der[S_INDEX(STEER_ANGLE)] =\n      fmaxf(fminf((control[1] * this->params_.steer_command_angle_scale - state[S_INDEX(STEER_ANGLE)]) *\n                      this->params_.steering_constant,\n                  this->params_.max_steer_rate),\n            -this->params_.max_steer_rate);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsImpl<CLASS_T, PARAMS_T>::getStoppingControl(const Eigen::Ref<const state_array>& state,\n                                                            Eigen::Ref<control_array> u)\n{\n  u[0] = -1.0;  // full brake\n  u[1] = 0.0;   // no steering\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsImpl<CLASS_T, PARAMS_T>::enforceLeash(const Eigen::Ref<const state_array>& state_true,\n                                                      const Eigen::Ref<const state_array>& state_nominal,\n                                                      const Eigen::Ref<const state_array>& leash_values,\n                                                      Eigen::Ref<state_array> state_output)\n{\n  state_output = state_true;\n\n  // update state_output for leash, need to handle x and y positions specially, convert to body frame and leash in body\n  // frame. transform x and y into body frame\n  float dx = state_nominal[S_INDEX(POS_X)] - state_true[S_INDEX(POS_X)];\n  float dy = state_nominal[S_INDEX(POS_Y)] - state_true[S_INDEX(POS_Y)];\n  float dx_body = dx * cos(state_true[S_INDEX(YAW)]) + dy * sin(state_true[S_INDEX(YAW)]);\n  float dy_body = -dx * sin(state_true[S_INDEX(YAW)]) + dy * cos(state_true[S_INDEX(YAW)]);\n\n  // determine leash in body frame\n  float y_leash = leash_values[S_INDEX(POS_Y)];\n  float x_leash = leash_values[S_INDEX(POS_X)];\n  dx_body = fminf(fmaxf(dx_body, -x_leash), x_leash);\n  dy_body = fminf(fmaxf(dy_body, -y_leash), y_leash);\n\n  // transform back to map frame\n  dx = dx_body * cos(state_true[S_INDEX(YAW)]) + -dy_body * sin(state_true[S_INDEX(YAW)]);\n  dy = dx_body * sin(state_true[S_INDEX(YAW)]) + dy_body * cos(state_true[S_INDEX(YAW)]);\n\n  // apply leash\n  state_output[S_INDEX(POS_X)] += dx;\n  state_output[S_INDEX(POS_Y)] += dy;\n\n  // handle leash for rest of states\n  float diff;\n  for (int i = 0; i < PARENT_CLASS::STATE_DIM; i++)\n  {\n    // use body x and y for leash\n    if (i == S_INDEX(POS_X) || i == S_INDEX(POS_Y))\n    {\n      // handle outside for loop\n      continue;\n    }\n    else if (i == S_INDEX(YAW))\n    {\n      diff = angle_utils::shortestAngularDistance(state_true[i], state_nominal[i]);\n    }\n    else\n    {\n      diff = state_nominal[i] - state_true[i];\n    }\n\n    if (leash_values[i] < fabsf(diff))\n    {\n      float leash_dir = fminf(fmaxf(diff, -leash_values[i]), leash_values[i]);\n      state_output[i] = state_true[i] + leash_dir;\n      if (i == S_INDEX(YAW))\n      {\n        state_output[i] = angle_utils::normalizeAngle(state_output[i]);\n      }\n    }\n    else\n    {\n      state_output[i] = state_nominal[i];\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nRacerDubinsImpl<CLASS_T, PARAMS_T>::state_array\nRacerDubinsImpl<CLASS_T, PARAMS_T>::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s = state_array::Zero();\n  if (map.find(\"VEL_X\") == map.end() || map.find(\"VEL_Y\") == map.end() || map.find(\"POS_X\") == map.end() ||\n      map.find(\"POS_Y\") == map.end())\n  {\n    return s;\n  }\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  if (map.find(\"STEER_ANGLE\") != map.end())\n  {\n    s(S_INDEX(STEER_ANGLE)) = map.at(\"STEER_ANGLE\");\n    s(S_INDEX(STEER_ANGLE_RATE)) = map.at(\"STEER_ANGLE_RATE\");\n  }\n  else\n  {\n    s(S_INDEX(STEER_ANGLE)) = 0;\n    s(S_INDEX(STEER_ANGLE_RATE)) = 0;\n  }\n  if (map.find(\"BRAKE_STATE\") != map.end())\n  {\n    s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_STATE\");\n  }\n  else if (map.find(\"BRAKE_CMD\") != map.end())\n  {\n    s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_CMD\");\n  }\n  else\n  {\n    s(S_INDEX(BRAKE_STATE)) = 0;\n  }\n\n  return s;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsImpl<CLASS_T, PARAMS_T>::computeParametricDelayDeriv(float* state, float* control,\n                                                                                float* state_der, PARAMS_T* params_p)\n{\n  bool enable_brake = control[C_INDEX(THROTTLE_BRAKE)] < 0.0f;\n  const float brake_error = (enable_brake * -control[C_INDEX(THROTTLE_BRAKE)] - state[S_INDEX(BRAKE_STATE)]);\n\n  // Compute dynamics\n  state_der[S_INDEX(BRAKE_STATE)] =\n      fminf(fmaxf((brake_error > 0) * brake_error * params_p->brake_delay_constant +\n                      (brake_error < 0) * brake_error * params_p->brake_delay_constant_neg,\n                  -params_p->max_brake_rate_neg),\n            params_p->max_brake_rate_pos);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsImpl<CLASS_T, PARAMS_T>::computeParametricSteerDeriv(float* state, float* control,\n                                                                                float* state_der, PARAMS_T* params_p)\n{\n  state_der[S_INDEX(STEER_ANGLE)] =\n      fmaxf(fminf((control[C_INDEX(STEER_CMD)] * params_p->steer_command_angle_scale - state[S_INDEX(STEER_ANGLE)]) *\n                      params_p->steering_constant,\n                  params_p->max_steer_rate),\n            -params_p->max_steer_rate);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsImpl<CLASS_T, PARAMS_T>::computeParametricDelayDeriv(const Eigen::Ref<const state_array>& state,\n                                                                     const Eigen::Ref<const control_array>& control,\n                                                                     Eigen::Ref<state_array> state_der)\n{\n  bool enable_brake = control(C_INDEX(THROTTLE_BRAKE)) < 0.0f;\n  const float brake_error = (enable_brake * -control(C_INDEX(THROTTLE_BRAKE)) - state(S_INDEX(BRAKE_STATE)));\n\n  state_der(S_INDEX(BRAKE_STATE)) =\n      fminf(fmaxf((brake_error > 0) * brake_error * this->params_.brake_delay_constant +\n                      (brake_error < 0) * brake_error * this->params_.brake_delay_constant_neg,\n                  -this->params_.max_brake_rate_neg),\n            this->params_.max_brake_rate_pos);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsImpl<CLASS_T, PARAMS_T>::computeParametricSteerDeriv(const Eigen::Ref<const state_array>& state,\n                                                                     const Eigen::Ref<const control_array>& control,\n                                                                     Eigen::Ref<state_array> state_der)\n{\n  state_der(S_INDEX(STEER_ANGLE)) =\n      (control(C_INDEX(STEER_CMD)) * this->params_.steer_command_angle_scale - state(S_INDEX(STEER_ANGLE))) *\n      this->params_.steering_constant;\n  state_der(S_INDEX(STEER_ANGLE)) =\n      fmaxf(fminf(state_der(S_INDEX(STEER_ANGLE)), this->params_.max_steer_rate), -this->params_.max_steer_rate);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void RacerDubinsImpl<CLASS_T, PARAMS_T>::setOutputs(const float* state_der, const float* next_state,\n                                                                        float* output)\n{\n  // Setup output\n  output[O_INDEX(BASELINK_VEL_B_X)] = next_state[S_INDEX(VEL_X)];\n  output[O_INDEX(BASELINK_VEL_B_Y)] = 0.0f;\n  output[O_INDEX(BASELINK_VEL_B_Z)] = 0.0f;\n  output[O_INDEX(BASELINK_POS_I_X)] = next_state[S_INDEX(POS_X)];\n  output[O_INDEX(BASELINK_POS_I_Y)] = next_state[S_INDEX(POS_Y)];\n  output[O_INDEX(PITCH)] = next_state[S_INDEX(PITCH)];\n  output[O_INDEX(ROLL)] = next_state[S_INDEX(ROLL)];\n  output[O_INDEX(YAW)] = next_state[S_INDEX(YAW)];\n  output[O_INDEX(STEER_ANGLE)] = next_state[S_INDEX(STEER_ANGLE)];\n  output[O_INDEX(STEER_ANGLE_RATE)] = next_state[S_INDEX(STEER_ANGLE_RATE)];\n  output[O_INDEX(WHEEL_FORCE_B_FL)] = 10000.0f;\n  output[O_INDEX(WHEEL_FORCE_B_FR)] = 10000.0f;\n  output[O_INDEX(WHEEL_FORCE_B_RL)] = 10000.0f;\n  output[O_INDEX(WHEEL_FORCE_B_RR)] = 10000.0f;\n  output[O_INDEX(ACCEL_X)] = state_der[S_INDEX(VEL_X)];\n  output[O_INDEX(ACCEL_Y)] = 0.0f;\n  output[O_INDEX(OMEGA_Z)] = state_der[S_INDEX(YAW)];\n  output[O_INDEX(TOTAL_VELOCITY)] = fabsf(next_state[S_INDEX(VEL_X)]);\n}\n\ntemplate <class OUTPUT_T, class TEX_T>\n__device__ __host__ void RACER::computeStaticSettling(TEX_T* tex_helper, const float yaw, const float x, const float y,\n                                                      float& roll, float& pitch, float* output)\n{\n  float height = 0.0f;\n\n  float3 body_front_left = make_float3(2.981f, 0.737f, 0.0f);\n  float3 body_front_right = make_float3(2.981f, -0.737f, 0.f);\n  float3 body_rear_left = make_float3(0.0f, 0.737f, 0.0f);\n  float3 body_rear_right = make_float3(0.0f, -0.737f, 0.0f);\n  float3 body_pose = make_float3(x, y, 0.0f);\n  float3 rotation = make_float3(roll, pitch, yaw);\n  float3 front_left, front_right, rear_left, rear_right;\n  mppi::math::bodyOffsetToWorldPoseEuler(body_front_left, body_pose, rotation, front_left);\n  mppi::math::bodyOffsetToWorldPoseEuler(body_front_right, body_pose, rotation, front_right);\n  mppi::math::bodyOffsetToWorldPoseEuler(body_rear_left, body_pose, rotation, rear_left);\n  mppi::math::bodyOffsetToWorldPoseEuler(body_rear_right, body_pose, rotation, rear_right);\n  // front_left = make_float3(front_left.x * cosf(yaw) - front_left.y * sinf(yaw) + x,\n  //                          front_left.x * sinf(yaw) + front_left.y * cosf(yaw) + y, 0.0f);\n  // front_right = make_float3(front_right.x * cosf(yaw) - front_right.y * sinf(yaw) + x,\n  //                           front_right.x * sinf(yaw) + front_right.y * cosf(yaw) + y, 0.0f);\n  // rear_left = make_float3(rear_left.x * cosf(yaw) - rear_left.y * sinf(yaw) + x,\n  //                         rear_left.x * sinf(yaw) + rear_left.y * cosf(yaw) + y, 0.0f);\n  // rear_right = make_float3(rear_right.x * cosf(yaw) - rear_right.y * sinf(yaw) + x,\n  //                          rear_right.x * sinf(yaw) + rear_right.y * cosf(yaw) + y, 0.0f);\n  float front_left_height = 0.0f;\n  float front_right_height = 0.0f;\n  float rear_left_height = 0.0f;\n  float rear_right_height = 0.0f;\n\n  if (tex_helper->checkTextureUse(0))\n  {\n    front_left_height = tex_helper->queryTextureAtWorldPose(0, front_left);\n    front_right_height = tex_helper->queryTextureAtWorldPose(0, front_right);\n    rear_left_height = tex_helper->queryTextureAtWorldPose(0, rear_left);\n    rear_right_height = tex_helper->queryTextureAtWorldPose(0, rear_right);\n\n    float front_diff = front_left_height - front_right_height;\n    front_diff = fmaxf(fminf(front_diff, 0.736f * 2.0f), -0.736f * 2.0f);\n    float rear_diff = rear_left_height - rear_right_height;\n    rear_diff = fmaxf(fminf(rear_diff, 0.736f * 2.0f), -0.736f * 2.0f);\n    float front_roll = asinf(front_diff / (0.737f * 2.0f));\n    float rear_roll = asinf(rear_diff / (0.737f * 2.0f));\n    roll = (front_roll + rear_roll) / 2.0f;\n\n    float left_diff = rear_left_height - front_left_height;\n    left_diff = fmaxf(fminf(left_diff, 2.98f), -2.98f);\n    float right_diff = rear_right_height - front_right_height;\n    right_diff = fmaxf(fminf(right_diff, 2.98f), -2.98f);\n    float left_pitch = asinf((left_diff) / 2.981f);\n    float right_pitch = asinf((right_diff) / 2.981f);\n    pitch = (left_pitch + right_pitch) / 2.0f;\n\n    height = (rear_left_height + rear_right_height) / 2.0f;\n\n    // using 2pi so any rotation that accidently uses this will be using identity\n    if (!isfinite(roll) || fabsf(roll) > static_cast<float>(M_PI))\n    {\n      roll = static_cast<float>(2.0 * M_PI);\n    }\n    if (!isfinite(pitch) || fabsf(pitch) > static_cast<float>(M_PI))\n    {\n      pitch = static_cast<float>(2.0 * M_PI);\n    }\n    if (!isfinite(height))\n    {\n      height = 0.0f;\n    }\n  }\n  else\n  {\n    roll = 0.0f;\n    pitch = 0.0f;\n    height = 0.0f;\n  }\n  output[E_INDEX(OUTPUT_T, BASELINK_POS_I_Z)] = height;\n}\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins.cuh",
    "content": "#ifndef MPPIGENERIC_RACER_DUBINS_CUH\n#define MPPIGENERIC_RACER_DUBINS_CUH\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/utils/angle_utils.cuh>\n\nnamespace RACER\n{\ntemplate <class OUTPUT_T, class TEX_T>\n__device__ __host__ static void computeStaticSettling(TEX_T* tex_helper, const float yaw, const float x, const float y,\n                                                      float& roll, float& pitch, float* output);\n};\n\nstruct RacerDubinsParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    VEL_X = 0,\n    YAW,\n    POS_X,\n    POS_Y,\n    STEER_ANGLE,\n    BRAKE_STATE,\n    STEER_ANGLE_RATE,\n    NUM_STATES\n  };\n\n  enum class ControlIndex : int\n  {\n    THROTTLE_BRAKE = 0,\n    STEER_CMD,\n    NUM_CONTROLS\n  };\n\n  enum class OutputIndex : int\n  {\n    BASELINK_VEL_B_X = 0,\n    BASELINK_VEL_B_Y,\n    BASELINK_POS_I_X,\n    BASELINK_POS_I_Y,\n    BASELINK_POS_I_Z,\n    YAW,\n    ROLL,\n    PITCH,\n    STEER_ANGLE,\n    STEER_ANGLE_RATE,\n    WHEEL_FORCE_UP_MAX,\n    WHEEL_FORCE_FWD_MAX,\n    WHEEL_FORCE_SIDE_MAX,\n    // WHEEL_FORCE_UP_FL,\n    // WHEEL_FORCE_UP_FR,\n    // WHEEL_FORCE_UP_RL,\n    // WHEEL_FORCE_UP_RR,\n    // WHEEL_FORCE_FWD_FL,\n    // WHEEL_FORCE_FWD_FR,\n    // WHEEL_FORCE_FWD_RL,\n    // WHEEL_FORCE_FWD_RR,\n    // WHEEL_FORCE_SIDE_FL,\n    // WHEEL_FORCE_SIDE_FR,\n    // WHEEL_FORCE_SIDE_RL,\n    // WHEEL_FORCE_SIDE_RR,\n    ACCEL_X,\n    ACCEL_Y,\n    OMEGA_Z,\n    TOTAL_VELOCITY,\n    UNCERTAINTY_POS_X,\n    UNCERTAINTY_POS_Y,\n    UNCERTAINTY_YAW,\n    UNCERTAINTY_VEL_X,\n    UNCERTAINTY_POS_X_Y,\n    UNCERTAINTY_POS_X_YAW,\n    UNCERTAINTY_POS_X_VEL_X,\n    UNCERTAINTY_POS_Y_YAW,\n    UNCERTAINTY_POS_Y_VEL_X,\n    UNCERTAINTY_YAW_VEL_X,\n    FILLER_1,\n    NUM_OUTPUTS\n  };\n\n  // engine model component\n  float c_t[3] = { 1.3, 2.6, 3.9 };\n  float c_b[3] = { 2.5, 3.5, 4.5 };\n  float c_v[3] = { 3.7, 4.7, 5.7 };\n  float c_0 = 4.9;\n  // steering component\n  float steering_constant = .6;\n  float steer_command_angle_scale = 5;\n  float steer_angle_scale = -9.1;\n  float max_steer_angle = 0.5;\n  float max_steer_rate = 5;\n  float steer_accel_constant = 12.1f;\n  float steer_accel_drag_constant = 1.0f;\n  // brake parametric component\n  float brake_delay_constant = 6.6;\n  float brake_delay_constant_neg = 8.2;\n  float max_brake_rate_neg = 0.9;\n  float max_brake_rate_pos = 0.33;\n  // system parameters\n  float wheel_base = 0.3;\n  float low_min_throttle = 0.13;\n  float gravity = -9.81;\n  int gear_sign = 1;\n};\n\nusing namespace MPPI_internal;\n\ntemplate <class CLASS_T, class PARAMS_T = RacerDubinsParams>\nclass RacerDubinsImpl : public Dynamics<CLASS_T, PARAMS_T>\n{\npublic:\n  typedef Dynamics<CLASS_T, PARAMS_T> PARENT_CLASS;\n  typedef typename PARENT_CLASS::state_array state_array;\n  typedef typename PARENT_CLASS::control_array control_array;\n  typedef typename PARENT_CLASS::output_array output_array;\n  typedef typename PARENT_CLASS::dfdx dfdx;\n  typedef typename PARENT_CLASS::dfdu dfdu;\n  using PARENT_CLASS::updateState;  // needed as overloading updateState here hides all parent versions of updateState\n\n  RacerDubinsImpl(cudaStream_t stream = nullptr) : PARENT_CLASS(stream)\n  {\n  }\n\n  RacerDubinsImpl(PARAMS_T& params, cudaStream_t stream = nullptr) : PARENT_CLASS(params, stream)\n  {\n  }\n  std::string getDynamicsModelName() const override\n  {\n    return \"RACER Dubins Model\";\n  }\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der);\n\n  // void computeStateDeriv(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n  //                         Eigen::Ref<state_array> state_der, output_array* output=nullptr); // TODO\n\n  void computeParametricDelayDeriv(const Eigen::Ref<const state_array>& state,\n                                   const Eigen::Ref<const control_array>& control, Eigen::Ref<state_array> state_der);\n\n  void computeParametricSteerDeriv(const Eigen::Ref<const state_array>& state,\n                                   const Eigen::Ref<const control_array>& control, Eigen::Ref<state_array> state_der);\n\n  __device__ void computeParametricDelayDeriv(float* state, float* control, float* state_der, PARAMS_T* params_p);\n\n  __device__ void computeParametricSteerDeriv(float* state, float* control, float* state_der, PARAMS_T* params_p);\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  state_array interpolateState(const Eigen::Ref<state_array> state_1, const Eigen::Ref<state_array> state_2,\n                               const float alpha);\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B);\n\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta = nullptr);\n\n  __host__ __device__ void setOutputs(const float* state_der, const float* next_state, float* output);\n\n  // __device__ void computeStateDeriv(float* state, float* control, float* state_der, float* theta_s, float\n  // *output=nullptr); // TODO\n\n  void getStoppingControl(const Eigen::Ref<const state_array>& state, Eigen::Ref<control_array> u);\n\n  Eigen::Quaternionf attitudeFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f positionFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f velocityFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f angularRateFromState(const Eigen::Ref<const state_array>& state);\n  state_array stateFromOdometry(const Eigen::Quaternionf& q, const Eigen::Vector3f& pos, const Eigen::Vector3f& vel,\n                                const Eigen::Vector3f& omega);\n\n  void enforceLeash(const Eigen::Ref<const state_array>& state_true, const Eigen::Ref<const state_array>& state_nominal,\n                    const Eigen::Ref<const state_array>& leash_values, Eigen::Ref<state_array> state_output);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n};\n\nclass RacerDubins : public RacerDubinsImpl<RacerDubins>\n{\npublic:\n  RacerDubins(cudaStream_t stream = nullptr) : RacerDubinsImpl<RacerDubins>(stream)\n  {\n  }\n\n  RacerDubins(RacerDubinsParams& params, cudaStream_t stream = nullptr) : RacerDubinsImpl<RacerDubins>(params, stream)\n  {\n  }\n};\n\n#if __CUDACC__\n#include \"racer_dubins.cu\"\n#endif\n\n#endif  // MPPIGENERIC_RACER_DUBINS_CUH\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation.cu",
    "content": "#include <mppi/dynamics/racer_dubins/racer_dubins_elevation.cuh>\n#include <mppi/utils/math_utils.h>\n\nnamespace mm = mppi::matrix_multiplication;\nnamespace mp1 = mppi::p1;\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::GPUSetup()\n{\n  PARENT_CLASS::GPUSetup();\n  tex_helper_->GPUSetup();\n  // makes sure that the device ptr sees the correct texture object\n  HANDLE_ERROR(cudaMemcpyAsync(&(this->model_d_->tex_helper_), &(tex_helper_->ptr_d_),\n                               sizeof(TwoDTextureHelper<float>*), cudaMemcpyHostToDevice, this->stream_));\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::freeCudaMem()\n{\n  tex_helper_->freeCudaMem();\n  PARENT_CLASS::freeCudaMem();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::paramsToDevice()\n{\n  // does all the internal texture updates\n  tex_helper_->copyToDevice();\n  PARENT_CLASS::paramsToDevice();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::computeParametricAccelDeriv(\n    const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n    Eigen::Ref<state_array> state_der, const float dt)\n{\n  float linear_brake_slope = 0.2f;\n  bool enable_brake = control(C_INDEX(THROTTLE_BRAKE)) < 0.0f;\n  int index = (abs(state(S_INDEX(VEL_X))) > linear_brake_slope && abs(state(S_INDEX(VEL_X))) <= 3.0f) +\n              (abs(state(S_INDEX(VEL_X))) > 3.0f) * 2;\n\n  const float brake_state = fmin(fmax(state(S_INDEX(BRAKE_STATE)), 0.0f), 0.25f);\n\n  // applying position throttle\n  float throttle = this->params_.c_t[index] * control(C_INDEX(THROTTLE_BRAKE));\n  float brake = this->params_.c_b[index] * brake_state * (state(S_INDEX(VEL_X)) >= 0.0f ? -1.0f : 1.0f);\n  if (abs(state(S_INDEX(VEL_X))) <= linear_brake_slope)\n  {\n    throttle = this->params_.c_t[index] * max(control(C_INDEX(THROTTLE_BRAKE)) - this->params_.low_min_throttle, 0.0f);\n    brake = this->params_.c_b[index] * brake_state * -state(S_INDEX(VEL_X));\n  }\n\n  state_der(S_INDEX(VEL_X)) = (!enable_brake) * throttle * this->params_.gear_sign + brake -\n                              this->params_.c_v[index] * state(S_INDEX(VEL_X)) + this->params_.c_0;\n  state_der(S_INDEX(VEL_X)) = min(max(state_der(S_INDEX(VEL_X)), -this->params_.clamp_ax), this->params_.clamp_ax);\n  if (fabsf(state[S_INDEX(PITCH)]) < static_cast<float>(M_PI_2))\n  {\n    state_der[S_INDEX(VEL_X)] -= this->params_.gravity * sinf(state[S_INDEX(PITCH)]);\n  }\n  state_der(S_INDEX(YAW)) = (state(S_INDEX(VEL_X)) / this->params_.wheel_base) *\n                            tanf(state(S_INDEX(STEER_ANGLE)) / this->params_.steer_angle_scale);\n  float yaw, sin_yaw, cos_yaw;\n  yaw = state[S_INDEX(YAW)];\n  sincosf(yaw, &sin_yaw, &cos_yaw);\n  state_der(S_INDEX(POS_X)) = state(S_INDEX(VEL_X)) * cos_yaw;\n  state_der(S_INDEX(POS_Y)) = state(S_INDEX(VEL_X)) * sin_yaw;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::setOutputs(const float* state_der,\n                                                                                 const float* next_state, float* output)\n{\n  // Setup output\n  // output[O_INDEX(BASELINK_VEL_B_X)] = next_state[S_INDEX(VEL_X)];\n  // output[O_INDEX(BASELINK_VEL_B_Y)] = 0.0f;\n  // output[O_INDEX(BASELINK_VEL_B_Z)] = 0.0f;\n  // output[O_INDEX(BASELINK_POS_I_X)] = next_state[S_INDEX(POS_X)];\n  // output[O_INDEX(BASELINK_POS_I_Y)] = next_state[S_INDEX(POS_Y)];\n  // output[O_INDEX(PITCH)] = next_state[S_INDEX(PITCH)];\n  // output[O_INDEX(ROLL)] = next_state[S_INDEX(ROLL)];\n  // output[O_INDEX(YAW)] = next_state[S_INDEX(YAW)];\n  // output[O_INDEX(STEER_ANGLE)] = next_state[S_INDEX(STEER_ANGLE)];\n  // output[O_INDEX(STEER_ANGLE_RATE)] = next_state[S_INDEX(STEER_ANGLE_RATE)];\n  // output[O_INDEX(WHEEL_FORCE_B_FL)] = 10000.0f;\n  // output[O_INDEX(WHEEL_FORCE_B_FR)] = 10000.0f;\n  // output[O_INDEX(WHEEL_FORCE_B_RL)] = 10000.0f;\n  // output[O_INDEX(WHEEL_FORCE_B_RR)] = 10000.0f;\n  // output[O_INDEX(ACCEL_X)] = state_der[S_INDEX(VEL_X)];\n  // output[O_INDEX(ACCEL_Y)] = 0.0f;\n  // output[O_INDEX(OMEGA_Z)] = state_der[S_INDEX(YAW)];\n  // output[O_INDEX(UNCERTAINTY_VEL_X)] = next_state[S_INDEX(UNCERTAINTY_VEL_X)];\n  // output[O_INDEX(UNCERTAINTY_YAW_VEL_X)] = next_state[S_INDEX(UNCERTAINTY_YAW_VEL_X)];\n  // output[O_INDEX(UNCERTAINTY_POS_X_VEL_X)] = next_state[S_INDEX(UNCERTAINTY_POS_X_VEL_X)];\n  // output[O_INDEX(UNCERTAINTY_POS_Y_VEL_X)] = next_state[S_INDEX(UNCERTAINTY_POS_Y_VEL_X)];\n  // output[O_INDEX(UNCERTAINTY_YAW)] = next_state[S_INDEX(UNCERTAINTY_YAW)];\n  // output[O_INDEX(UNCERTAINTY_POS_X_YAW)] = next_state[S_INDEX(UNCERTAINTY_POS_X_YAW)];\n  // output[O_INDEX(UNCERTAINTY_POS_Y_YAW)] = next_state[S_INDEX(UNCERTAINTY_POS_Y_YAW)];\n  // output[O_INDEX(UNCERTAINTY_POS_X)] = next_state[S_INDEX(UNCERTAINTY_POS_X)];\n  // output[O_INDEX(UNCERTAINTY_POS_X_Y)] = next_state[S_INDEX(UNCERTAINTY_POS_X_Y)];\n  // output[O_INDEX(UNCERTAINTY_POS_Y)] = next_state[S_INDEX(UNCERTAINTY_POS_Y)];\n\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  for (int i = pi; i < this->OUTPUT_DIM; i += step)\n  {\n    switch (i)\n    {\n      case O_INDEX(BASELINK_VEL_B_X):\n        output[i] = next_state[S_INDEX(VEL_X)];\n        break;\n      case O_INDEX(BASELINK_VEL_B_Y):\n        output[i] = 0.0f;\n        break;\n      // case O_INDEX(BASELINK_VEL_B_Z):\n      //   output[i] = 0.0f;\n      //   break;\n      case O_INDEX(BASELINK_POS_I_X):\n        output[i] = next_state[S_INDEX(POS_X)];\n        break;\n      case O_INDEX(BASELINK_POS_I_Y):\n        output[i] = next_state[S_INDEX(POS_Y)];\n        break;\n      case O_INDEX(PITCH):\n        output[i] = next_state[S_INDEX(PITCH)];\n        break;\n      case O_INDEX(ROLL):\n        output[i] = next_state[S_INDEX(ROLL)];\n        break;\n      case O_INDEX(YAW):\n        output[i] = next_state[S_INDEX(YAW)];\n        break;\n      case O_INDEX(STEER_ANGLE):\n        output[i] = next_state[S_INDEX(STEER_ANGLE)];\n        break;\n      case O_INDEX(STEER_ANGLE_RATE):\n        output[i] = next_state[S_INDEX(STEER_ANGLE_RATE)];\n        break;\n      case O_INDEX(WHEEL_FORCE_UP_MAX):\n        output[i] = NAN;\n        break;\n      case O_INDEX(WHEEL_FORCE_FWD_MAX):\n        output[i] = NAN;\n        break;\n      case O_INDEX(WHEEL_FORCE_SIDE_MAX):\n        output[i] = NAN;\n        break;\n      // case O_INDEX(WHEEL_FORCE_UP_FL):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_UP_FR):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_UP_RL):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_UP_RR):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_FWD_FL):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_FWD_FR):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_FWD_RL):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_FWD_RR):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_SIDE_FL):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_SIDE_FR):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_SIDE_RL):\n      //   output[i] = NAN;\n      //   break;\n      // case O_INDEX(WHEEL_FORCE_SIDE_RR):\n      //   output[i] = NAN;\n      //   break;\n      case O_INDEX(ACCEL_X):\n        output[i] = state_der[S_INDEX(VEL_X)];\n        break;\n      case O_INDEX(ACCEL_Y):\n        output[i] = 0.0f;\n        break;\n      case O_INDEX(OMEGA_Z):\n        output[i] = state_der[S_INDEX(YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_YAW_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_YAW_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_Y_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_YAW):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_YAW):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X_YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y_YAW):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_Y_YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_Y):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X_Y)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_Y)];\n        break;\n      case O_INDEX(TOTAL_VELOCITY):\n        output[i] = fabsf(next_state[S_INDEX(VEL_X)]);\n        break;\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nvoid RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::step(Eigen::Ref<state_array> state,\n                                                       Eigen::Ref<state_array> next_state,\n                                                       Eigen::Ref<state_array> state_der,\n                                                       const Eigen::Ref<const control_array>& control,\n                                                       Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  this->computeParametricDelayDeriv(state, control, state_der);\n  this->computeParametricSteerDeriv(state, control, state_der);\n  computeParametricAccelDeriv(state, control, state_der, dt);\n  SharedBlock sb;\n\n  // Integrate using racer_dubins updateState\n  this->PARENT_CLASS::updateState(state, next_state, state_der, dt);\n\n  computeUncertaintyPropagation(state.data(), control.data(), state_der.data(), next_state.data(), dt, &this->params_,\n                                &sb, nullptr);\n  float roll = state(S_INDEX(ROLL));\n  float pitch = state(S_INDEX(PITCH));\n  RACER::computeStaticSettling<typename DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n      this->tex_helper_, next_state(S_INDEX(YAW)), next_state(S_INDEX(POS_X)), next_state(S_INDEX(POS_Y)), roll, pitch,\n      output.data());\n  next_state[S_INDEX(PITCH)] = pitch;\n  next_state[S_INDEX(ROLL)] = roll;\n\n  this->setOutputs(state_der.data(), next_state.data(), output.data());\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nbool RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::computeGrad(const Eigen::Ref<const state_array>& state,\n                                                              const Eigen::Ref<const control_array>& control,\n                                                              Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n{\n  A = dfdx::Zero();\n  B = dfdu::Zero();\n\n  float eps = 0.01f;\n  bool enable_brake = control(C_INDEX(THROTTLE_BRAKE)) < 0.0f;\n\n  // vx\n  float linear_brake_slope = 0.2f;\n  int index = (abs(state(S_INDEX(VEL_X))) > linear_brake_slope && abs(state(S_INDEX(VEL_X))) <= 3.0f) +\n              (abs(state(S_INDEX(VEL_X))) > 3.0f) * 2;\n\n  A(0, 0) = -this->params_.c_v[index];\n  if (abs(state(S_INDEX(VEL_X))) < linear_brake_slope)\n  {\n    A(0, 5) = this->params_.c_b[index] * -state(S_INDEX(VEL_X));\n  }\n  else\n  {\n    A(0, 5) = this->params_.c_b[index] * (state(S_INDEX(VEL_X)) >= 0.0f ? -1.0f : 1.0f);\n  }\n  // TODO zero out if we are above the threshold to match??\n\n  // yaw\n  A(1, 0) = (1.0f / this->params_.wheel_base) * tanf(state(S_INDEX(STEER_ANGLE)) / this->params_.steer_angle_scale);\n  A(1, 4) = (state(S_INDEX(VEL_X)) / this->params_.wheel_base) *\n            (1.0 / SQ(cosf(state(S_INDEX(STEER_ANGLE)) / this->params_.steer_angle_scale))) /\n            this->params_.steer_angle_scale;\n  // pos x\n  A(2, 0) = cosf(state(S_INDEX(YAW)));\n  A(2, 1) = -sinf(state(S_INDEX(YAW))) * state(S_INDEX(VEL_X));\n  // pos y\n  A(3, 0) = sinf(state(S_INDEX(YAW)));\n  A(3, 1) = cosf(state(S_INDEX(YAW))) * state(S_INDEX(VEL_X));\n  // steer angle\n  float steer_dot =\n      (control(C_INDEX(STEER_CMD)) * this->params_.steer_command_angle_scale - state(S_INDEX(STEER_ANGLE))) *\n      this->params_.steering_constant;\n  if (steer_dot - eps < -this->params_.max_steer_rate || steer_dot + eps > this->params_.max_steer_rate)\n  {\n    A(4, 4) = 0.0f;\n  }\n  else\n  {\n    A(4, 4) = -this->params_.steering_constant;\n  }\n  A(4, 4) = max(min(A(4, 4), this->params_.max_steer_rate), -this->params_.max_steer_rate);\n  // gravity\n  A(0, 7) = -this->params_.gravity * cosf(state(S_INDEX(PITCH)));\n\n  // brake delay\n  float brake_dot = (enable_brake * -control(C_INDEX(THROTTLE_BRAKE)) - state(S_INDEX(BRAKE_STATE))) *\n                    this->params_.brake_delay_constant;\n  if (brake_dot - eps < -this->params_.max_brake_rate_neg || brake_dot + eps > this->params_.max_brake_rate_pos)\n  {\n    A(5, 5) = 0.0f;\n  }\n  else\n  {\n    A(5, 5) = -this->params_.brake_delay_constant;\n  }\n\n  // steering command\n  B(4, 1) = this->params_.steer_command_angle_scale * this->params_.steering_constant;\n  // throttle command\n  B(0, 0) = this->params_.c_t[index] * this->params_.gear_sign * (!enable_brake);\n  // brake command\n  if ((state(S_INDEX(BRAKE_STATE)) < -this->control_rngs_[C_INDEX(THROTTLE_BRAKE)].x && brake_dot < 0.0f) ||\n      (state(S_INDEX(BRAKE_STATE)) > 0.0f && brake_dot > 0.0f))\n  {\n    B(5, 0) = -this->params_.brake_delay_constant * enable_brake;\n  }\n  return true;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ bool RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::computeUncertaintyJacobian(const float* state,\n                                                                                                 const float* control,\n                                                                                                 float* A,\n                                                                                                 DYN_PARAMS_T* params_p)\n{\n  float sin_yaw, cos_yaw, tan_steer_angle, cos_2_delta;\n#ifdef __CUDA_ARCH__\n  float yaw_norm = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  float delta = state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale;\n  __sincosf(yaw_norm, &sin_yaw, &cos_yaw);\n  tan_steer_angle = __tanf(delta);\n  cos_2_delta = SQ(__cosf(delta));\n#else\n  sincosf(state[S_INDEX(YAW)], &sin_yaw, &cos_yaw);\n  float delta = state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale;\n  tan_steer_angle = tanf(delta);\n  cos_2_delta = SQ(cosf(delta));\n#endif\n  // const float cos_2_delta = cos_yaw * cos_yaw;\n\n  // vx\n  float linear_brake_slope = 0.2f;\n  int index = (fabsf(state[S_INDEX(VEL_X)]) > linear_brake_slope && fabsf(state[S_INDEX(VEL_X)]) <= 3.0f) +\n              (fabsf(state[S_INDEX(VEL_X)]) > 3.0f) * 2;\n\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  const float brake_state = fmin(fmax(state[S_INDEX(BRAKE_STATE)], 0.0f), 0.25f);\n  // A = df/dx + df/du * K\n  for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  {\n    switch (i)\n    {\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        A[i] = -params_p->c_v[index] - params_p->K_vel_x - (index == 0 ? 1.0f : 0.0f) * params_p->c_b[0] * brake_state;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        A[i] = -params_p->K_x * cos_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        A[i] = -params_p->K_x * sin_yaw;\n        break;\n\n      // yaw\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        A[i] = tan_steer_angle / (params_p->wheel_base);\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), UNCERTAINTY_DIM):\n        A[i] = -fabsf(state[S_INDEX(VEL_X)]) * params_p->K_yaw / (params_p->wheel_base * cos_2_delta);\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        A[i] = state[S_INDEX(VEL_X)] * params_p->K_y * sin_yaw / (params_p->wheel_base * cos_2_delta);\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        A[i] = -state[S_INDEX(VEL_X)] * params_p->K_y * cos_yaw / (params_p->wheel_base * cos_2_delta);\n        break;\n      // pos x\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        A[i] = cos_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        A[i] = -sin_yaw * state[S_INDEX(VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      // pos y\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        A[i] = sin_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), UNCERTAINTY_DIM):\n        A[i] = cos_yaw * state[S_INDEX(VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        A[i] = 0.0f;\n        break;\n    }\n  }\n  return true;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ bool RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::computeQ(const float* state, const float* control,\n                                                                               const float* state_der, float* Q,\n                                                                               DYN_PARAMS_T* params_p, float* theta_s)\n{\n  const float abs_vx = fabsf(state[S_INDEX(VEL_X)]);\n  const float abs_acc_x = fabsf(state_der[S_INDEX(VEL_X)]);\n  const float delta = state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale;\n\n  float sin_yaw, cos_yaw, tan_steer_angle, sin_roll;\n#ifdef __CUDA_ARCH__\n  const float yaw_norm = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  __sincosf(yaw_norm, &sin_yaw, &cos_yaw);\n  tan_steer_angle = __tanf(delta);\n  sin_roll = __sinf(angle_utils::normalizeAngle(state[S_INDEX(ROLL)]));\n#else\n  sincosf(state[S_INDEX(YAW)], &sin_yaw, &cos_yaw);\n  tan_steer_angle = tanf(delta);\n#endif\n  const float side_force = SQ(abs_vx) * tan_steer_angle / params_p->wheel_base + params_p->gravity * sin_roll;\n  // const float Q_11 = params_p->Q_y_f * side_force * side_force * abs_vx;\n  const float Q_11 = fabsf(params_p->Q_y_f * fabsf(side_force) * fmaxf(abs_vx - 2, 0.0f));\n\n  const float linear_brake_slope = 0.2f;\n  const int index = (fabsf(state[S_INDEX(VEL_X)]) > linear_brake_slope && fabsf(state[S_INDEX(VEL_X)]) <= 3.0f) +\n                    (fabsf(state[S_INDEX(VEL_X)]) > 3.0f) * 2;\n\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  {\n    switch (i)\n    {\n      // vel_x\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = params_p->Q_x_acc * abs_acc_x + params_p->Q_x_v[index] * abs_vx;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      // yaw\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = abs_vx * (params_p->Q_omega_steering * fabsf(delta) + params_p->Q_omega_v);\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      // pos x\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = Q_11 * sin_yaw * sin_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = -Q_11 * sin_yaw * cos_yaw;\n        break;\n      // pos y\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = Q_11 * cos_yaw * cos_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = -Q_11 * sin_yaw * cos_yaw;\n        break;\n    }\n  }\n  return true;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::uncertaintyStateToMatrix(const float* state, float* uncertainty_matrix)\n{\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  {\n    switch (i)\n    {\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_YAW_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_Y_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_YAW_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_YAW)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X_YAW)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_Y_YAW)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X_YAW)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X_Y)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_Y_VEL_X)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_Y_YAW)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_X_Y)];\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        uncertainty_matrix[i] = state[S_INDEX(UNCERTAINTY_POS_Y)];\n        break;\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::uncertaintyMatrixToState(const float* uncertainty_matrix, float* state)\n{\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  for (int i = pi + S_INDEX(UNCERTAINTY_POS_X); i < this->STATE_DIM; i += step)\n  {\n    switch (i)\n    {\n      case S_INDEX(UNCERTAINTY_VEL_X):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_YAW_VEL_X):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_X_VEL_X):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_Y_VEL_X):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_YAW):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_X_YAW):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_Y_YAW):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_X):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_X_Y):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), UNCERTAINTY_DIM)];\n        break;\n      case S_INDEX(UNCERTAINTY_POS_Y):\n        state[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), UNCERTAINTY_DIM)];\n        break;\n      default:\n        break;\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::uncertaintyMatrixToOutput(const float* uncertainty_matrix, float* output)\n{\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  for (int i = pi + O_INDEX(UNCERTAINTY_POS_X); i < this->OUTPUT_DIM; i += step)\n  {\n    switch (i)\n    {\n      case O_INDEX(UNCERTAINTY_VEL_X):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_YAW_VEL_X):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_VEL_X):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y_VEL_X):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_YAW):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_YAW):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y_YAW):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_Y):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), UNCERTAINTY_DIM)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y):\n        output[i] = uncertainty_matrix[mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), UNCERTAINTY_DIM)];\n        break;\n      default:\n        break;\n    }\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__host__ __device__ void RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::computeUncertaintyPropagation(\n    const float* state, const float* control, const float* state_der, float* next_state, float dt,\n    DYN_PARAMS_T* params_p, SharedBlock* uncertainty_data, float* theta_s)\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  // if (params_p->gear_sign == -1)\n  // {  // Set Uncertainty to zero in reverse\n  //   for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  //   {\n  //     uncertainty_data->Sigma_a[i] = 0.0f;\n  //   }\n  // #ifdef __CUDA_ARCH__\n  //   __syncthreads();\n  // #endif\n  //   derived->uncertaintyMatrixToState(uncertainty_data->Sigma_a, next_state);\n  // #ifdef __CUDA_ARCH__\n  //   __syncthreads();\n  // #endif\n  //   return;\n  // }\n  derived->computeUncertaintyJacobian(state, control, uncertainty_data->A, params_p);\n  derived->uncertaintyStateToMatrix(state, uncertainty_data->Sigma_a);\n#ifdef __CUDA_ARCH__\n  __syncthreads();  // TODO: Check if this syncthreads is even needed\n#endif\n  // Turn A into (I + A dt)\n  for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  {\n    uncertainty_data->A[i] = (i % (UNCERTAINTY_DIM + 1) == 0) + uncertainty_data->A[i] * dt;\n  }\n\n#ifdef __CUDA_ARCH__\n  __syncthreads();\n  mm::gemm1<UNCERTAINTY_DIM, UNCERTAINTY_DIM, UNCERTAINTY_DIM, mp1::Parallel1Dir::THREAD_Y>(\n      uncertainty_data->A, uncertainty_data->Sigma_a, uncertainty_data->Sigma_b, 1.0f, 0.0f, mm::MAT_OP::NONE,\n      mm::MAT_OP::NONE);\n  __syncthreads();\n  mm::gemm1<UNCERTAINTY_DIM, UNCERTAINTY_DIM, UNCERTAINTY_DIM, mp1::Parallel1Dir::THREAD_Y>(\n      uncertainty_data->Sigma_b, uncertainty_data->A, uncertainty_data->Sigma_a, 1.0f, 0.0f, mm::MAT_OP::NONE,\n      mm::MAT_OP::TRANSPOSE);\n  __syncthreads();\n#else\n  typedef Eigen::Matrix<float, UNCERTAINTY_DIM, UNCERTAINTY_DIM> eigen_uncertainty_matrx;\n  Eigen::Map<eigen_uncertainty_matrx> A_eigen(uncertainty_data->A);\n  Eigen::Map<eigen_uncertainty_matrx> Sigma_a_eigen(uncertainty_data->Sigma_a);\n  Eigen::Map<eigen_uncertainty_matrx> Sigma_b_eigen(uncertainty_data->Sigma_b);\n  Sigma_a_eigen = A_eigen * Sigma_a_eigen * A_eigen.transpose();\n#endif\n  // float Q[UNCERTAINTY_DIM * UNCERTAINTY_DIM] = {\n  //   1.0f, 0.0f, 0.0f, 0.0f,\n  //   0.0f, 0.01f, 0.0f, 0.0f,\n  //   0.0f, 0.0f, 1.0f, 0.0f,\n  //   0.0f, 0.0f, 0.0f, 0.25f,\n  // };\n  derived->computeQ(state, control, state_der, uncertainty_data->Sigma_b, params_p, theta_s);\n#ifdef __CUDA_ARCH__\n  __syncthreads();  // TODO: Check if this syncthreads is even needed\n#endif\n  for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  {\n    uncertainty_data->Sigma_a[i] += uncertainty_data->Sigma_b[i] * dt;\n  }\n#ifdef __CUDA_ARCH__\n  __syncthreads();\n#endif\n  derived->uncertaintyMatrixToState(uncertainty_data->Sigma_a, next_state);\n#ifdef __CUDA_ARCH__\n  __syncthreads();\n#endif\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::initializeDynamics(float* state, float* control,\n                                                                                float* output, float* theta_s,\n                                                                                float t_0, float dt)\n{\n  PARENT_CLASS::initializeDynamics(state, control, output, theta_s, t_0, dt);\n  if (this->SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  {  // Allows us to turn on or off global or shared memory version of params\n    DYN_PARAMS_T* shared_params = (DYN_PARAMS_T*)theta_s;\n    *shared_params = this->params_;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::computeParametricAccelDeriv(float* state, float* control,\n                                                                                         float* state_der,\n                                                                                         const float dt,\n                                                                                         DYN_PARAMS_T* params_p)\n{\n  const int tdy = threadIdx.y;\n  float linear_brake_slope = 0.2f;\n\n  // Compute dynamics\n  bool enable_brake = control[C_INDEX(THROTTLE_BRAKE)] < 0.0f;\n  int index = (fabsf(state[S_INDEX(VEL_X)]) > linear_brake_slope && fabsf(state[S_INDEX(VEL_X)]) <= 3.0f) +\n              (fabsf(state[S_INDEX(VEL_X)]) > 3.0f) * 2;\n  const float brake_state = fminf(fmaxf(state[S_INDEX(BRAKE_STATE)], 0.0f), 0.25f);\n  // applying position throttle\n  float throttle = params_p->c_t[index] * control[C_INDEX(THROTTLE_BRAKE)];\n  float brake = params_p->c_b[index] * brake_state * (state[S_INDEX(VEL_X)] >= 0.0f ? -1.0f : 1.0f);\n  if (fabsf(state[S_INDEX(VEL_X)]) <= linear_brake_slope)\n  {\n    throttle = params_p->c_t[index] * fmaxf(control[C_INDEX(THROTTLE_BRAKE)] - params_p->low_min_throttle, 0.0f);\n    brake = params_p->c_b[index] * brake_state * -state[S_INDEX(VEL_X)];\n  }\n\n  if (tdy == 0)\n  {\n    state_der[S_INDEX(VEL_X)] = (!enable_brake) * throttle * params_p->gear_sign + brake -\n                                params_p->c_v[index] * state[S_INDEX(VEL_X)] + params_p->c_0;\n    state_der[S_INDEX(VEL_X)] = fminf(fmaxf(state_der[S_INDEX(VEL_X)], -params_p->clamp_ax), params_p->clamp_ax);\n    if (fabsf(state[S_INDEX(PITCH)]) < static_cast<float>(M_PI_2))\n    {\n      state_der[S_INDEX(VEL_X)] -= params_p->gravity * __sinf(angle_utils::normalizeAngle(state[S_INDEX(PITCH)]));\n    }\n  }\n  state_der[S_INDEX(YAW)] =\n      (state[S_INDEX(VEL_X)] / params_p->wheel_base) *\n      __tanf(angle_utils::normalizeAngle(state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale));\n  const float yaw_norm = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  state_der[S_INDEX(POS_X)] = state[S_INDEX(VEL_X)] * __cosf(yaw_norm);\n  state_der[S_INDEX(POS_Y)] = state[S_INDEX(VEL_X)] * __sinf(yaw_norm);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::updateState(float* state, float* next_state,\n                                                                         float* state_der, const float dt,\n                                                                         DYN_PARAMS_T* params_p)\n{\n  // const uint tdy = threadIdx.y;\n  int pi, step;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n\n  // Set to 6 as the last 3 states do not do euler integration\n  for (int i = pi; i < 6; i += step)\n  {\n    // if (i == S_INDEX(ROLL) || i == S_INDEX(PITCH) || i == S_INDEX(STEER_ANGLE_RATE))\n    // {\n    //   continue;\n    // }\n    next_state[i] = state[i] + state_der[i] * dt;\n    switch (i)\n    {\n      case S_INDEX(YAW):\n        next_state[i] = angle_utils::normalizeAngle(next_state[i]);\n        break;\n      case S_INDEX(STEER_ANGLE):\n        next_state[S_INDEX(STEER_ANGLE)] =\n            fmaxf(fminf(next_state[S_INDEX(STEER_ANGLE)], params_p->max_steer_angle), -params_p->max_steer_angle);\n        next_state[S_INDEX(STEER_ANGLE_RATE)] = state_der[S_INDEX(STEER_ANGLE)];\n        break;\n      case S_INDEX(BRAKE_STATE):\n        next_state[S_INDEX(BRAKE_STATE)] = fminf(fmaxf(next_state[S_INDEX(BRAKE_STATE)], 0.0f), 1.0f);\n        break;\n    }\n  }\n  __syncthreads();\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ inline void RacerDubinsElevationImpl<CLASS_T, PARAMS_T>::step(float* state, float* next_state,\n                                                                         float* state_der, float* control,\n                                                                         float* output, float* theta_s, const float t,\n                                                                         const float dt)\n{\n  DYN_PARAMS_T* params_p;\n  SharedBlock* sb;\n  // TODO the below two if statements conflict in a bad way\n  if (this->SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  {  // Allows us to turn on or off global or shared memory version of params\n    params_p = (DYN_PARAMS_T*)theta_s;\n  }\n  else\n  {\n    params_p = &(this->params_);\n  }\n  if (this->SHARED_MEM_REQUEST_BLK_BYTES != 0)\n  {\n    float* sb_mem =\n        &theta_s[mppi::math::int_multiple_const(this->SHARED_MEM_REQUEST_GRD_BYTES, sizeof(float4)) / sizeof(float)];\n    sb = (SharedBlock*)&sb_mem[(this->SHARED_MEM_REQUEST_BLK_BYTES * threadIdx.x +\n                                this->SHARED_MEM_REQUEST_BLK_BYTES * blockDim.x * threadIdx.z) /\n                               sizeof(float)];\n  }\n  this->computeParametricDelayDeriv(state, control, state_der, params_p);\n  this->computeParametricSteerDeriv(state, control, state_der, params_p);\n  computeParametricAccelDeriv(state, control, state_der, dt, params_p);\n\n  updateState(state, next_state, state_der, dt, params_p);\n  computeUncertaintyPropagation(state, control, state_der, next_state, dt, params_p, sb, theta_s);\n\n  if (threadIdx.y == 0)\n  {\n    float roll = state[S_INDEX(ROLL)];\n    float pitch = state[S_INDEX(PITCH)];\n    RACER::computeStaticSettling<DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n        this->tex_helper_, next_state[S_INDEX(YAW)], next_state[S_INDEX(POS_X)], next_state[S_INDEX(POS_Y)], roll,\n        pitch, output);\n    next_state[S_INDEX(PITCH)] = pitch;\n    next_state[S_INDEX(ROLL)] = roll;\n  }\n  __syncthreads();\n  this->setOutputs(state_der, next_state, output);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::state_array\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::stateFromMap(const std::map<std::string, float>& map)\n{\n  std::vector<std::string> keys = { \"VEL_X\",      \"VEL_Y\", \"POS_X\",       \"POS_Y\",\n                                    \"ROLL\",       \"PITCH\", \"STEER_ANGLE\", \"STEER_ANGLE_RATE\",\n                                    \"BRAKE_STATE\" };\n  bool look_for_uncertainty = false;\n  if (look_for_uncertainty)\n  {\n    keys.push_back(\"UNCERTAINTY_POS_X\");\n    keys.push_back(\"UNCERTAINTY_POS_Y\");\n    keys.push_back(\"UNCERTAINTY_YAW\");\n    keys.push_back(\"UNCERTAINTY_VEL_X\");\n    keys.push_back(\"UNCERTAINTY_POS_X_Y\");\n    keys.push_back(\"UNCERTAINTY_POS_X_YAW\");\n    keys.push_back(\"UNCERTAINTY_POS_X_VEL_X\");\n    keys.push_back(\"UNCERTAINTY_POS_Y_YAW\");\n    keys.push_back(\"UNCERTAINTY_POS_Y_VEL_X\");\n    keys.push_back(\"UNCERTAINTY_YAW_VEL_X\");\n  }\n\n  bool found_all_keys = true;\n  for (const auto& key : keys)\n  {\n    if (map.find(key) == map.end())\n    {\n      this->logger_->warning(\"Could not find %s key for elevation dynamics\\n\", key.c_str());\n      found_all_keys = false;\n    }\n  }\n\n  state_array s = this->getZeroState();\n  if (!found_all_keys)\n  {\n    return state_array::Constant(NAN);\n  }\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  s(S_INDEX(STEER_ANGLE)) = map.at(\"STEER_ANGLE\");\n  s(S_INDEX(STEER_ANGLE_RATE)) = map.at(\"STEER_ANGLE_RATE\");\n  s(S_INDEX(ROLL)) = map.at(\"ROLL\");\n  s(S_INDEX(PITCH)) = map.at(\"PITCH\");\n  s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_STATE\");\n  if (look_for_uncertainty)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_X)) = map.at(\"UNCERTAINTY_POS_X\");\n    s(S_INDEX(UNCERTAINTY_POS_Y)) = map.at(\"UNCERTAINTY_POS_Y\");\n    s(S_INDEX(UNCERTAINTY_YAW)) = map.at(\"UNCERTAINTY_YAW\");\n    s(S_INDEX(UNCERTAINTY_VEL_X)) = map.at(\"UNCERTAINTY_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_POS_X_Y)) = map.at(\"UNCERTAINTY_POS_X_Y\");\n    s(S_INDEX(UNCERTAINTY_POS_X_YAW)) = map.at(\"UNCERTAINTY_POS_X_YAW\");\n    s(S_INDEX(UNCERTAINTY_POS_X_VEL_X)) = map.at(\"UNCERTAINTY_POS_X_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_POS_Y_YAW)) = map.at(\"UNCERTAINTY_POS_Y_YAW\");\n    s(S_INDEX(UNCERTAINTY_POS_Y_VEL_X)) = map.at(\"UNCERTAINTY_POS_Y_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_YAW_VEL_X)) = map.at(\"UNCERTAINTY_YAW_VEL_X\");\n  }\n  float eps = 1e-6f;\n  if (s(S_INDEX(UNCERTAINTY_POS_X)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_X)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_POS_Y)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_Y)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_YAW)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_YAW)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_VEL_X)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_VEL_X)) = eps;\n  }\n  return s;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Quaternionf\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::attitudeFromState(const Eigen::Ref<const state_array>& state)\n{\n  Eigen::Quaternionf q;\n  mppi::math::Euler2QuatNWU(state(S_INDEX(ROLL)), state(S_INDEX(PITCH)), state(S_INDEX(YAW)), q);\n  return q;\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\nEigen::Vector3f\nRacerDubinsElevationImpl<CLASS_T, PARAMS_T>::positionFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Vector3f(state[S_INDEX(POS_X)], state[S_INDEX(POS_Y)], 0.0f);\n}\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation.cuh",
    "content": "#ifndef MPPIGENERIC_RACER_DUBINS_ELEVATION_CUH\n#define MPPIGENERIC_RACER_DUBINS_ELEVATION_CUH\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/dynamics/racer_dubins/racer_dubins.cuh>\n#include <mppi/utils/texture_helpers/two_d_texture_helper.cuh>\n#include <mppi/utils/angle_utils.cuh>\n\nusing namespace MPPI_internal;\n\n#ifndef U_INDEX\n#define U_IND_CLASS(CLASS, enum_val) E_INDEX(CLASS::UncertaintyIndex, enum_val)\n#define U_INDEX(enum_val) U_IND_CLASS(PARENT_CLASS::DYN_PARAMS_T, enum_val)\n#endif\n\nstruct RacerDubinsElevationParams : public RacerDubinsParams\n{\n  enum class StateIndex : int\n  {\n    VEL_X = 0,\n    YAW,\n    POS_X,\n    POS_Y,\n    STEER_ANGLE,\n    BRAKE_STATE,\n    ROLL,\n    PITCH,\n    STEER_ANGLE_RATE,\n    UNCERTAINTY_POS_X,\n    UNCERTAINTY_POS_Y,\n    UNCERTAINTY_YAW,\n    UNCERTAINTY_VEL_X,\n    UNCERTAINTY_POS_X_Y,\n    UNCERTAINTY_POS_X_YAW,\n    UNCERTAINTY_POS_X_VEL_X,\n    UNCERTAINTY_POS_Y_YAW,\n    UNCERTAINTY_POS_Y_VEL_X,\n    UNCERTAINTY_YAW_VEL_X,\n    NUM_STATES\n  };\n  enum class UncertaintyIndex : int\n  {\n    VEL_X = 0,\n    YAW,\n    POS_X,\n    POS_Y,\n    NUM_UNCERTAINTIES\n  };\n  float clamp_ax = 5.5f;\n  // Uncertainty feedback and Noise coefficients\n  float K_x = 1.0f;                                       // feedback for pos_x\n  float K_y = 1.0f;                                       // feedback for pos_y\n  float K_yaw = 1.0f;                                     // feedback for yaw\n  float K_vel_x = 1.0f;                                   // feedback for vel x\n  float Q_x_acc = 1.0f;                                   // Add noise to vel x based on accel_x\n  float Q_x_v[3] = { 41.74219, -0.8187027, -2.2131343 };  // Add noise to vel x based on vel x\n  float Q_y_f = 0.1f;                                     // Add noise to pos x and y based on side force\n  float Q_omega_v = 0.001f;                               // Add noise to yaw based on vel x\n  float Q_omega_steering = 0.0f;                          // Add noise to yaw based on steering\n};\n\ntemplate <class CLASS_T, class PARAMS_T>\nclass RacerDubinsElevationImpl : public RacerDubinsImpl<CLASS_T, PARAMS_T>\n{\npublic:\n  // static const int SHARED_MEM_REQUEST_GRD_BYTES = sizeof(DYN_PARAMS_T);\n  using PARENT_CLASS = RacerDubinsImpl<CLASS_T, PARAMS_T>;\n  using PARENT_CLASS::initializeDynamics;\n\n  typedef PARAMS_T DYN_PARAMS_T;\n  static const int UNCERTAINTY_DIM = U_IND_CLASS(PARAMS_T, NUM_UNCERTAINTIES);\n\n  struct __align__(16) SharedBlock\n  {\n    float A[UNCERTAINTY_DIM * UNCERTAINTY_DIM] MPPI_ALIGN(16) = { 0.0f };\n    float Sigma_a[UNCERTAINTY_DIM * UNCERTAINTY_DIM] MPPI_ALIGN(16) = { 0.0f };\n    float Sigma_b[UNCERTAINTY_DIM * UNCERTAINTY_DIM] MPPI_ALIGN(16) = { 0.0f };\n  };\n\n  typedef typename PARENT_CLASS::state_array state_array;\n  typedef typename PARENT_CLASS::control_array control_array;\n  typedef typename PARENT_CLASS::output_array output_array;\n  typedef typename PARENT_CLASS::dfdx dfdx;\n  typedef typename PARENT_CLASS::dfdu dfdu;\n\n  RacerDubinsElevationImpl(cudaStream_t stream = nullptr) : PARENT_CLASS(stream)\n  {\n    tex_helper_ = new TwoDTextureHelper<float>(1, stream);\n    this->SHARED_MEM_REQUEST_BLK_BYTES = sizeof(SharedBlock);\n  }\n  RacerDubinsElevationImpl(PARAMS_T& params, cudaStream_t stream = nullptr) : PARENT_CLASS(params, stream)\n  {\n    tex_helper_ = new TwoDTextureHelper<float>(1, stream);\n    this->SHARED_MEM_REQUEST_BLK_BYTES = sizeof(SharedBlock);\n  }\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"RACER Dubins w/ Elevation Model\";\n  }\n\n  ~RacerDubinsElevationImpl()\n  {\n    freeCudaMem();\n    delete tex_helper_;\n  }\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt)\n  {\n    this->PARENT_CLASS::updateState(state, next_state, state_der, dt);\n  }\n\n  void updateRotation(std::array<float3, 3>& rotation)\n  {\n    this->tex_helper_->updateRotation(0, rotation);\n  }\n\n  void bindToStream(cudaStream_t stream)\n  {\n    this->tex_helper_->bindToStream(stream);\n    this->PARENT_CLASS::bindToStream(stream);\n  }\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  void paramsToDevice();\n\n  void computeParametricAccelDeriv(const Eigen::Ref<const state_array>& state,\n                                   const Eigen::Ref<const control_array>& control, Eigen::Ref<state_array> state_der,\n                                   const float dt);\n\n  __host__ __device__ void setOutputs(const float* state_der, const float* next_state, float* output);\n\n  __device__ void computeParametricAccelDeriv(float* state, float* control, float* state_der, const float dt,\n                                              DYN_PARAMS_T* params_p);\n\n  bool computeGrad(const Eigen::Ref<const state_array>& state = state_array(),\n                   const Eigen::Ref<const control_array>& control = control_array(), Eigen::Ref<dfdx> A = dfdx(),\n                   Eigen::Ref<dfdu> B = dfdu());\n\n  __host__ __device__ void computeUncertaintyPropagation(const float* state, const float* control,\n                                                         const float* state_der, float* next_state, float dt,\n                                                         DYN_PARAMS_T* params_p, SharedBlock* uncertainty_data,\n                                                         float* theta_s);\n\n  __host__ __device__ void uncertaintyMatrixToOutput(const float* uncertainty_matrix, float* output);\n  __host__ __device__ void uncertaintyMatrixToState(const float* uncertainty_matrix, float* state);\n  __host__ __device__ void uncertaintyStateToMatrix(const float* state, float* uncertainty_matrix);\n  __host__ __device__ bool computeUncertaintyJacobian(const float* state, const float* control, float* A,\n                                                      DYN_PARAMS_T* params_p);\n  __host__ __device__ bool computeQ(const float* state, const float* control, const float* state_der, float* Q,\n                                    DYN_PARAMS_T* params_p, float* theta_s);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt,\n                              DYN_PARAMS_T* params_p);\n\n  void computeStateDeriv(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                         Eigen::Ref<state_array> state_der)\n  {\n  }\n\n  state_array computeStateError(const Eigen::Ref<const state_array>& pred_state,\n                                const Eigen::Ref<const state_array>& true_state)\n  {\n    state_array state_diff = pred_state - true_state;\n    state_diff(S_INDEX(YAW)) = angle_utils::shortestAngularDistance(true_state(S_INDEX(YAW)), pred_state(S_INDEX(YAW)));\n    state_diff(S_INDEX(ROLL)) =\n        angle_utils::shortestAngularDistance(true_state(S_INDEX(ROLL)), pred_state(S_INDEX(ROLL)));\n    state_diff(S_INDEX(PITCH)) =\n        angle_utils::shortestAngularDistance(true_state(S_INDEX(PITCH)), pred_state(S_INDEX(PITCH)));\n    return state_diff;\n  }\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n\n  __device__ void initializeDynamics(float* state, float* control, float* output, float* theta_s, float t_0, float dt);\n\n  Eigen::Quaternionf attitudeFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f positionFromState(const Eigen::Ref<const state_array>& state);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n\n  TwoDTextureHelper<float>* getTextureHelper()\n  {\n    return tex_helper_;\n  }\n\nprotected:\n  TwoDTextureHelper<float>* tex_helper_ = nullptr;\n};\n\nclass RacerDubinsElevation : public RacerDubinsElevationImpl<RacerDubinsElevation, RacerDubinsElevationParams>\n{\npublic:\n  RacerDubinsElevation(cudaStream_t stream = nullptr)\n    : RacerDubinsElevationImpl<RacerDubinsElevation, RacerDubinsElevationParams>(stream)\n  {\n  }\n  RacerDubinsElevation(RacerDubinsElevationParams& params, cudaStream_t stream = nullptr)\n    : RacerDubinsElevationImpl<RacerDubinsElevation, RacerDubinsElevationParams>(params, stream)\n  {\n  }\n};\n\n#if __CUDACC__\n#include \"racer_dubins_elevation.cu\"\n#endif\n\n#endif  // MPPIGENERIC_RACER_DUBINS_ELEVATION_CUH\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation_lstm_steering.cu",
    "content": "//\n// Created by jason on 8/31/22.\n//\n\n#include \"racer_dubins_elevation_lstm_steering.cuh\"\n\n#define TEMPLATE_TYPE template <class CLASS_T, class PARAMS_T>\n#define TEMPLATE_NAME RacerDubinsElevationLSTMSteeringImpl<CLASS_T, PARAMS_T>\n\nTEMPLATE_TYPE\nTEMPLATE_NAME::RacerDubinsElevationLSTMSteeringImpl(int init_input_dim, int init_hidden_dim,\n                                                    std::vector<int>& init_output_layers, int input_dim, int hidden_dim,\n                                                    std::vector<int>& output_layers, int init_len, cudaStream_t stream)\n  : PARENT_CLASS(stream)\n{\n  this->requires_buffer_ = true;\n  lstm_lstm_helper_ = std::make_shared<LSTMLSTMHelper<>>(init_input_dim, init_hidden_dim, init_output_layers, input_dim,\n                                                         hidden_dim, output_layers, init_len, stream);\n  this->SHARED_MEM_REQUEST_GRD_BYTES = lstm_lstm_helper_->getLSTMModel()->getGrdSharedSizeBytes();\n  this->SHARED_MEM_REQUEST_BLK_BYTES = sizeof(SharedBlock) + lstm_lstm_helper_->getLSTMModel()->getBlkSharedSizeBytes();\n}\n\nTEMPLATE_TYPE\nTEMPLATE_NAME::RacerDubinsElevationLSTMSteeringImpl(std::string path, cudaStream_t stream) : PARENT_CLASS(stream)\n{\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n  this->params_.max_steer_rate = param_dict.at(\"parameters/max_rate_pos\").data<float>()[0];\n  this->params_.steering_constant = param_dict.at(\"parameters/constant\").data<float>()[0];\n  this->params_.steer_accel_constant = param_dict.at(\"parameters/accel_constant\").data<float>()[0];\n  this->params_.steer_accel_drag_constant = param_dict.at(\"parameters/accel_drag_constant\").data<float>()[0];\n  lstm_lstm_helper_ = std::make_shared<LSTMLSTMHelper<>>(path, stream);\n\n  this->requires_buffer_ = true;\n  this->SHARED_MEM_REQUEST_GRD_BYTES = lstm_lstm_helper_->getLSTMModel()->getGrdSharedSizeBytes();\n  this->SHARED_MEM_REQUEST_BLK_BYTES = sizeof(SharedBlock) + lstm_lstm_helper_->getLSTMModel()->getBlkSharedSizeBytes();\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::GPUSetup()\n{\n  lstm_lstm_helper_->GPUSetup();\n  // makes sure that the device ptr sees the correct lstm model\n  this->network_d_ = lstm_lstm_helper_->getLSTMDevicePtr();\n  PARENT_CLASS::GPUSetup();\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::bindToStream(cudaStream_t stream)\n{\n  PARENT_CLASS::bindToStream(stream);\n  lstm_lstm_helper_->getLSTMModel()->bindToStream(stream);\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::freeCudaMem()\n{\n  PARENT_CLASS::freeCudaMem();\n  lstm_lstm_helper_->freeCudaMem();\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::computeLSTMSteering(Eigen::Ref<state_array> state, const Eigen::Ref<const control_array>& control,\n                                        Eigen::Ref<state_array> state_der)\n{\n  const float parametric_accel =\n      (control(C_INDEX(STEER_CMD)) * this->params_.steer_command_angle_scale - state(S_INDEX(STEER_ANGLE))) *\n      this->params_.steering_constant;\n  state_der(S_INDEX(STEER_ANGLE_RATE)) =\n      fmaxf(fminf((parametric_accel - state(S_INDEX(STEER_ANGLE_RATE))) * this->params_.steer_accel_constant -\n                      state(S_INDEX(STEER_ANGLE_RATE)) * this->params_.steer_accel_drag_constant,\n                  this->params_.max_steer_rate),\n            -this->params_.max_steer_rate);\n\n  Eigen::VectorXf input = lstm_lstm_helper_->getLSTMModel()->getZeroInputVector();\n  input(0) = state(S_INDEX(STEER_ANGLE)) * 0.2f;\n  input(1) = state(S_INDEX(STEER_ANGLE_RATE)) * 0.2f;\n  input(2) = control(C_INDEX(STEER_CMD));\n  input(3) = state_der(S_INDEX(STEER_ANGLE_RATE)) * 0.2f;  // this is the parametric part as input\n  Eigen::VectorXf nn_output = lstm_lstm_helper_->getLSTMModel()->getZeroOutputVector();\n  lstm_lstm_helper_->forward(input, nn_output);\n  state_der(S_INDEX(STEER_ANGLE_RATE)) += nn_output(0) * 5.0f;\n  state_der(S_INDEX(STEER_ANGLE)) = state(S_INDEX(STEER_ANGLE_RATE));\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state,\n                         Eigen::Ref<state_array> state_der, const Eigen::Ref<const control_array>& control,\n                         Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  this->computeParametricDelayDeriv(state, control, state_der);\n  this->computeParametricAccelDeriv(state, control, state_der, dt);\n  this->computeLSTMSteering(state, control, state_der);\n\n  // Integrate using racer_dubins updateState\n  updateState(state, next_state, state_der, dt);\n  SharedBlock sb;\n  computeUncertaintyPropagation(state.data(), control.data(), state_der.data(), next_state.data(), dt, &this->params_,\n                                &sb, nullptr);\n\n  float roll = state(S_INDEX(ROLL));\n  float pitch = state(S_INDEX(PITCH));\n  RACER::computeStaticSettling<typename DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n      this->tex_helper_, next_state(S_INDEX(YAW)), next_state(S_INDEX(POS_X)), next_state(S_INDEX(POS_Y)), roll, pitch,\n      output.data());\n  next_state[S_INDEX(PITCH)] = pitch;\n  next_state[S_INDEX(ROLL)] = roll;\n\n  setOutputs(state_der.data(), next_state.data(), output.data());\n}\n\nTEMPLATE_TYPE\n__device__ void TEMPLATE_NAME::initializeDynamics(float* state, float* control, float* output, float* theta_s,\n                                                  float t_0, float dt)\n{\n  // const int shift = PARENT_CLASS::SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);\n  // if (PARENT_CLASS::SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  // {  // Allows us to turn on or off global or shared memory version of params\n  //   DYN_PARAMS_T* shared_params = (DYN_PARAMS_T*)theta_s;\n  //   *shared_params = this->params_;\n  // }\n  network_d_->initialize(theta_s, this->SHARED_MEM_REQUEST_BLK_BYTES, this->SHARED_MEM_REQUEST_GRD_BYTES,\n                         sizeof(SharedBlock) / sizeof(float));\n  setOutputs(state, state, output);\n}\n\ntemplate <class CLASS_T, class PARAMS_T>\n__device__ void RacerDubinsElevationLSTMSteeringImpl<CLASS_T, PARAMS_T>::computeLSTMSteering(\n    float* state, float* control, float* state_der, DYN_PARAMS_T* params_p, float* theta_s, const int grd_shift,\n    const int blk_shift, const int sb_shift)\n{\n  const uint tdy = threadIdx.y;\n\n  // loads in the input to the network\n  float* input_loc = network_d_->getInputLocation(theta_s, grd_shift, blk_shift, sb_shift);\n  if (tdy == 0)\n  {\n    const float parametric_accel =\n        (control[C_INDEX(STEER_CMD)] * params_p->steer_command_angle_scale - state[S_INDEX(STEER_ANGLE)]) *\n        params_p->steering_constant;\n    state_der[S_INDEX(STEER_ANGLE_RATE)] =\n        fmaxf(fminf((parametric_accel - state[S_INDEX(STEER_ANGLE_RATE)]) * params_p->steer_accel_constant -\n                        state[S_INDEX(STEER_ANGLE_RATE)] * params_p->steer_accel_drag_constant,\n                    params_p->max_steer_rate),\n              -params_p->max_steer_rate);\n\n    input_loc[0] = state[S_INDEX(STEER_ANGLE)] * 0.2f;\n    input_loc[1] = state[S_INDEX(STEER_ANGLE_RATE)] * 0.2f;\n    input_loc[2] = control[C_INDEX(STEER_CMD)];\n    input_loc[3] = state_der[S_INDEX(STEER_ANGLE_RATE)] * 0.2f;  // this is the parametric part as input\n  }\n  __syncthreads();\n  // runs the network\n  float* cur_hidden_cell = network_d_->getHiddenCellLocation(theta_s, grd_shift, blk_shift, sb_shift);\n  float* nn_output = network_d_->forward(nullptr, theta_s, cur_hidden_cell);\n  // copies the results of the network to state derivative\n  if (tdy == 0)\n  {\n    state_der[S_INDEX(STEER_ANGLE_RATE)] += nn_output[0] * 5.0f;\n    state_der[S_INDEX(STEER_ANGLE)] = state[S_INDEX(STEER_ANGLE_RATE)];\n  }\n  __syncthreads();\n}\n\nTEMPLATE_TYPE\n__device__ inline void TEMPLATE_NAME::step(float* state, float* next_state, float* state_der, float* control,\n                                           float* output, float* theta_s, const float t, const float dt)\n{\n  DYN_PARAMS_T* params_p;\n  SharedBlock* sb;\n  // TODO below conficts in a bad way\n  // if (PARENT_CLASS::SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  // {  // Allows us to turn on or off global or shared memory version of params\n  //   params_p = (DYN_PARAMS_T*)theta_s;\n  // }\n  // else\n  // {\n  //   params_p = &(this->params_);\n  // }\n  params_p = &(this->params_);\n  const int grd_shift = this->SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);  // grid size to shift by\n  const int blk_shift = this->SHARED_MEM_REQUEST_BLK_BYTES * (threadIdx.x + blockDim.x * threadIdx.z) /\n                        sizeof(float);                       // blk size to shift by\n  const int sb_shift = sizeof(SharedBlock) / sizeof(float);  // how much to shift inside a block to lstm values\n  if (this->SHARED_MEM_REQUEST_BLK_BYTES != 0)\n  {\n    float* sb_mem = &theta_s[grd_shift];  // does the grid shift\n    sb = (SharedBlock*)(sb_mem + blk_shift);\n  }\n  computeParametricDelayDeriv(state, control, state_der, params_p);\n  computeParametricAccelDeriv(state, control, state_der, dt, params_p);\n\n  computeLSTMSteering(state, control, state_der, params_p, theta_s, grd_shift, blk_shift, sb_shift);\n\n  updateState(state, next_state, state_der, dt);\n  computeUncertaintyPropagation(state, control, state_der, next_state, dt, params_p, sb, theta_s);\n  if (threadIdx.y == 0)\n  {\n    float roll = state[S_INDEX(ROLL)];\n    float pitch = state[S_INDEX(PITCH)];\n    RACER::computeStaticSettling<DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n        this->tex_helper_, next_state[S_INDEX(YAW)], next_state[S_INDEX(POS_X)], next_state[S_INDEX(POS_Y)], roll,\n        pitch, output);\n    next_state[S_INDEX(PITCH)] = pitch;\n    next_state[S_INDEX(ROLL)] = roll;\n  }\n  __syncthreads();\n  setOutputs(state_der, next_state, output);\n}\n\nTEMPLATE_TYPE\nbool TEMPLATE_NAME::updateFromBuffer(const buffer_trajectory& buffer)\n{\n  std::vector<std::string> keys = { \"STEER_ANGLE\", \"STEER_ANGLE_RATE\", \"CAN_STEER_CMD\" };\n\n  bool missing_key = this->checkIfKeysInBuffer(buffer, keys);\n  if (missing_key)\n  {\n    return false;\n  }\n  Eigen::MatrixXf init_buffer = lstm_lstm_helper_->getEmptyBufferMatrix(buffer.at(\"STEER_ANGLE\").rows());\n\n  init_buffer.row(0) = buffer.at(\"STEER_ANGLE\") * 0.2f;\n  init_buffer.row(1) = buffer.at(\"STEER_ANGLE_RATE\") * 0.2f;\n  init_buffer.row(2) = buffer.at(\"CAN_STEER_CMD\");\n\n  lstm_lstm_helper_->initializeLSTM(init_buffer);\n  return true;\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::initializeDynamics(const Eigen::Ref<const state_array>& state,\n                                       const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output,\n                                       float t_0, float dt)\n{\n  this->lstm_lstm_helper_->resetLSTMHiddenCellCPU();\n  PARENT_CLASS::initializeDynamics(state, control, output, t_0, dt);\n}\n\nTEMPLATE_TYPE\n__device__ void TEMPLATE_NAME::updateState(float* state, float* next_state, float* state_der, const float dt)\n{\n  int i;\n  int tdy = threadIdx.y;\n  // Add the state derivative time dt to the current state.\n  for (i = tdy; i < 6; i += blockDim.y)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n    if (i == S_INDEX(YAW))\n    {\n      next_state[i] = angle_utils::normalizeAngle(next_state[i]);\n    }\n    if (i == S_INDEX(STEER_ANGLE))\n    {\n      next_state[i] = fmaxf(fminf(next_state[i], this->params_.max_steer_angle), -this->params_.max_steer_angle);\n      next_state[S_INDEX(STEER_ANGLE_RATE)] =\n          state[S_INDEX(STEER_ANGLE_RATE)] + state_der[S_INDEX(STEER_ANGLE_RATE)] * dt;\n    }\n    if (i == S_INDEX(BRAKE_STATE))\n    {\n      next_state[i] = fminf(fmaxf(next_state[i], 0.0f), 1.0f);\n    }\n  }\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                                Eigen::Ref<state_array> state_der, const float dt)\n{\n  // Segmented it to ensure that roll and pitch don't get overwritten\n  for (int i = 0; i < 6; i++)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n  }\n  next_state(S_INDEX(YAW)) = angle_utils::normalizeAngle(next_state(S_INDEX(YAW)));\n  next_state(S_INDEX(STEER_ANGLE)) =\n      fmaxf(fminf(next_state(S_INDEX(STEER_ANGLE)), this->params_.max_steer_angle), -this->params_.max_steer_angle);\n  next_state(S_INDEX(STEER_ANGLE_RATE)) = state(S_INDEX(STEER_ANGLE_RATE)) + state_der(S_INDEX(STEER_ANGLE_RATE)) * dt;\n  next_state(S_INDEX(BRAKE_STATE)) =\n      fminf(fmaxf(next_state(S_INDEX(BRAKE_STATE)), 0.0f), -this->control_rngs_[C_INDEX(THROTTLE_BRAKE)].x);\n}\n\n#undef TEMPLATE_NAME\n#undef TEMPLATE_TYPE\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation_lstm_steering.cuh",
    "content": "//\n// Created by jason on 8/31/22.\n//\n\n#include \"racer_dubins_elevation.cuh\"\n#include <mppi/utils/nn_helpers/lstm_lstm_helper.cuh>\n\n#ifndef MPPIGENERIC_RACER_DUBINS_ELEVATION_LSTM_STEERING_CUH\n#define MPPIGENERIC_RACER_DUBINS_ELEVATION_LSTM_STEERING_CUH\n\ntemplate <class CLASS_T, class PARAMS_T = RacerDubinsElevationParams>\nclass RacerDubinsElevationLSTMSteeringImpl : public RacerDubinsElevationImpl<CLASS_T, PARAMS_T>\n{\n  static_assert(std::is_base_of<RacerDubinsElevationParams, PARAMS_T>::value,\n                \"Params don't inherit from RacerDubinsElevationParams.\");\n\npublic:\n  using PARENT_CLASS = RacerDubinsElevationImpl<CLASS_T, PARAMS_T>;\n  using DYN_PARAMS_T = typename PARENT_CLASS::DYN_PARAMS_T;\n  using SharedBlock = typename PARENT_CLASS::SharedBlock;\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n  using output_array = typename PARENT_CLASS::output_array;\n  using buffer_trajectory = typename PARENT_CLASS::buffer_trajectory;\n  using PARENT_CLASS::computeParametricAccelDeriv;\n  using PARENT_CLASS::computeParametricDelayDeriv;\n  using PARENT_CLASS::computeUncertaintyPropagation;\n  using PARENT_CLASS::setOutputs;\n\n  RacerDubinsElevationLSTMSteeringImpl(LSTMLSTMConfig config, cudaStream_t stream = nullptr)\n    : RacerDubinsElevationLSTMSteeringImpl(config.init_config.input_dim, config.init_config.hidden_dim,\n                                           config.init_config.output_layers, config.pred_config.input_dim,\n                                           config.pred_config.hidden_dim, config.pred_config.output_layers,\n                                           config.init_len, stream)\n  {\n  }\n\n  RacerDubinsElevationLSTMSteeringImpl(int init_input_dim, int init_hidden_dim, std::vector<int>& init_output_layers,\n                                       int input_dim, int hidden_dim, std::vector<int>& output_layers, int init_len,\n                                       cudaStream_t stream = nullptr);\n  RacerDubinsElevationLSTMSteeringImpl(std::string path, cudaStream_t stream = nullptr);\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"RACER Dubins LSTM Steering Model\";\n  }\n\n  void bindToStream(cudaStream_t stream);\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  bool updateFromBuffer(const buffer_trajectory& buffer);\n\n  void computeLSTMSteering(Eigen::Ref<state_array> state, const Eigen::Ref<const control_array>& control,\n                           Eigen::Ref<state_array> state_der);\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  void initializeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                          Eigen::Ref<output_array> output, float t_0, float dt);\n\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n\n  __device__ void initializeDynamics(float* state, float* control, float* output, float* theta_s, float t_0, float dt);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  __device__ void computeLSTMSteering(float* state, float* control, float* state_der, DYN_PARAMS_T* params_p,\n                                      float* theta_s, const int grd_shift, const int blk_shift, const int sb_shift);\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n\n  std::shared_ptr<LSTMLSTMHelper<>> getHelper()\n  {\n    return lstm_lstm_helper_;\n  }\n\n  LSTMHelper<>* network_d_ = nullptr;\n\nprotected:\n  std::shared_ptr<LSTMLSTMHelper<>> lstm_lstm_helper_;\n\n  RacerDubinsElevationLSTMSteeringImpl(cudaStream_t stream = 0)\n  {\n  }\n};\n\nclass RacerDubinsElevationLSTMSteering : public RacerDubinsElevationLSTMSteeringImpl<RacerDubinsElevationLSTMSteering>\n{\npublic:\n  using PARENT_CLASS = RacerDubinsElevationLSTMSteeringImpl<RacerDubinsElevationLSTMSteering>;\n  RacerDubinsElevationLSTMSteering(int init_input_dim, int init_hidden_dim, std::vector<int>& init_output_layers,\n                                   int input_dim, int hidden_dim, std::vector<int>& output_layers, int init_len,\n                                   cudaStream_t stream = nullptr)\n    : PARENT_CLASS(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers, init_len,\n                   stream)\n  {\n  }\n  RacerDubinsElevationLSTMSteering(std::string path, cudaStream_t stream = nullptr) : PARENT_CLASS(path, stream)\n  {\n  }\n\nprotected:\n  RacerDubinsElevationLSTMSteering(cudaStream_t stream = nullptr) : PARENT_CLASS(stream)\n  {\n  }\n};\n\n#if __CUDACC__\n#include \"racer_dubins_elevation_lstm_steering.cu\"\n#endif\n\n#endif  // MPPIGENERIC_RACER_DUBINS_ELEVATION_LSTM_STEERING_CUH\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation_lstm_unc.cu",
    "content": "#include \"racer_dubins_elevation_lstm_unc.cuh\"\n\n// TODO fix the clamping of the brake value\n// TODO look into replacing roll/pitch values with static settled values\n\nRacerDubinsElevationLSTMUncertainty::RacerDubinsElevationLSTMUncertainty(LSTMLSTMConfig& steer_config,\n                                                                         LSTMLSTMConfig& mean_config,\n                                                                         LSTMLSTMConfig& unc_config,\n                                                                         cudaStream_t stream)\n  : PARENT_CLASS(steer_config, stream)\n{\n  this->mean_helper_ = std::make_shared<LSTMLSTMHelper<>>(mean_config, stream);\n  this->uncertainty_helper_ = std::make_shared<LSTMLSTMHelper<>>(unc_config, stream);\n\n  this->SHARED_MEM_REQUEST_GRD_BYTES = lstm_lstm_helper_->getLSTMModel()->getGrdSharedSizeBytes() +\n                                       mean_helper_->getLSTMModel()->getGrdSharedSizeBytes() +\n                                       uncertainty_helper_->getLSTMModel()->getGrdSharedSizeBytes();\n  this->SHARED_MEM_REQUEST_BLK_BYTES = sizeof(SharedBlock) +\n                                       lstm_lstm_helper_->getLSTMModel()->getBlkSharedSizeBytes() +\n                                       mean_helper_->getLSTMModel()->getBlkSharedSizeBytes() +\n                                       uncertainty_helper_->getLSTMModel()->getBlkSharedSizeBytes();\n  this->requires_buffer_ = true;\n}\n\nRacerDubinsElevationLSTMUncertainty::RacerDubinsElevationLSTMUncertainty(std::string path, cudaStream_t stream)\n  : PARENT_CLASS(stream)\n{\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n\n  lstm_lstm_helper_ = std::make_shared<LSTMLSTMHelper<>>(path, \"steering/model/\", stream);\n  uncertainty_helper_ = std::make_shared<LSTMLSTMHelper<>>(path, \"terra/uncertainty_network/\", stream);\n  mean_helper_ = std::make_shared<LSTMLSTMHelper<>>(path, \"terra/mean_network/\", stream);\n\n  // empty path means that the parent class will not load things\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n  // loads the steering constants\n  this->params_.max_steer_rate = param_dict.at(\"steering/parameters/max_rate_pos\").data<double>()[0];\n  this->params_.steering_constant = param_dict.at(\"steering/parameters/constant\").data<double>()[0];\n  this->params_.steer_accel_constant = param_dict.at(\"steering/parameters/accel_constant\").data<double>()[0];\n  this->params_.steer_accel_drag_constant = param_dict.at(\"steering/parameters/accel_drag_constant\").data<double>()[0];\n\n  this->params_.max_brake_rate_pos = param_dict.at(\"brake/parameters/max_rate_pos\").data<double>()[0];\n  this->params_.max_brake_rate_neg = param_dict.at(\"brake/parameters/max_rate_neg\").data<double>()[0];\n  for (int i = 0; i < 3; i++)\n  {\n    this->params_.c_t[i] = param_dict.at(\"terra/parameters/c_t\").data<double>()[i];\n    this->params_.c_b[i] = param_dict.at(\"terra/parameters/c_b\").data<double>()[i];\n    this->params_.c_v[i] = param_dict.at(\"terra/parameters/c_v\").data<double>()[i];\n\n    this->params_.pos_quad_brake_c[i] = param_dict.at(\"brake/parameters/constant\").data<double>()[i];\n    this->params_.neg_quad_brake_c[i] = param_dict.at(\"brake/parameters/negative_constant\").data<double>()[i];\n  }\n  this->params_.clamp_ax = 1000.0f;\n  this->params_.wheel_base = 3.0f;\n  this->params_.c_0 = 0.0f;\n  this->params_.max_steer_angle = 5.0f;\n  this->params_.low_min_throttle = 0.0f;\n  this->params_.gravity = param_dict.at(\"terra/parameters/gravity\").data<double>()[0];\n  this->params_.steer_angle_scale = param_dict.at(\"terra/parameters/steer_angle_scale\").data<double>()[0];\n  for (int i = 0; i < uncertainty_helper_->getLSTMModel()->getOutputDim(); i++)\n  {\n    this->params_.unc_scale[i] = param_dict.at(\"terra/parameters/uncertainty_scale\").data<double>()[i];\n  }\n\n  this->SHARED_MEM_REQUEST_GRD_BYTES = lstm_lstm_helper_->getLSTMModel()->getGrdSharedSizeBytes() +\n                                       mean_helper_->getLSTMModel()->getGrdSharedSizeBytes() +\n                                       uncertainty_helper_->getLSTMModel()->getGrdSharedSizeBytes();\n  this->SHARED_MEM_REQUEST_BLK_BYTES = sizeof(SharedBlock) +\n                                       lstm_lstm_helper_->getLSTMModel()->getBlkSharedSizeBytes() +\n                                       mean_helper_->getLSTMModel()->getBlkSharedSizeBytes() +\n                                       uncertainty_helper_->getLSTMModel()->getBlkSharedSizeBytes();\n  this->requires_buffer_ = true;\n}\n\nvoid RacerDubinsElevationLSTMUncertainty::GPUSetup()\n{\n  uncertainty_helper_->GPUSetup();\n  this->uncertainty_d_ = uncertainty_helper_->getLSTMDevicePtr();\n  mean_helper_->GPUSetup();\n  this->mean_d_ = mean_helper_->getLSTMDevicePtr();\n  PARENT_CLASS::GPUSetup();\n}\n\nvoid RacerDubinsElevationLSTMUncertainty::bindToStream(cudaStream_t stream)\n{\n  PARENT_CLASS::bindToStream(stream);\n  uncertainty_helper_->getLSTMModel()->bindToStream(stream);\n  mean_helper_->getLSTMModel()->bindToStream(stream);\n}\n\nvoid RacerDubinsElevationLSTMUncertainty::freeCudaMem()\n{\n  uncertainty_helper_->freeCudaMem();\n  mean_helper_->freeCudaMem();\n  PARENT_CLASS::freeCudaMem();\n}\n\nbool RacerDubinsElevationLSTMUncertainty::updateFromBuffer(const buffer_trajectory& buffer)\n{\n  bool parent_success = PARENT_CLASS::updateFromBuffer(buffer);\n\n  std::vector<std::string> keys = { \"STEER_ANGLE\",   \"STEER_ANGLE_RATE\", \"CAN_STEER_CMD\", \"ROLL\",\n                                    \"PITCH\",         \"CAN_THROTTLE_CMD\", \"VEL_X\",         \"BRAKE_STATE\",\n                                    \"CAN_BRAKE_CMD\", \"OMEGA_Z\" };\n\n  bool missing_key = this->checkIfKeysInBuffer(buffer, keys);\n  if (missing_key || !parent_success)\n  {\n    return false;\n  }\n\n  Eigen::MatrixXf mean_init_buffer = mean_helper_->getEmptyBufferMatrix(buffer.at(\"VEL_X\").rows());\n  mean_init_buffer.row(0) = buffer.at(\"VEL_X\");\n  mean_init_buffer.row(1) = buffer.at(\"OMEGA_Z\");\n  mean_init_buffer.row(2) = buffer.at(\"BRAKE_STATE\");\n  mean_init_buffer.row(3) = buffer.at(\"STEER_ANGLE\");\n  mean_init_buffer.row(4) = buffer.at(\"STEER_ANGLE_RATE\");\n  mean_init_buffer.row(5) = buffer.at(\"CAN_THROTTLE_CMD\");\n  mean_init_buffer.row(6) = buffer.at(\"CAN_BRAKE_CMD\");\n  mean_init_buffer.row(7) = buffer.at(\"CAN_STEER_CMD\");\n  mean_init_buffer.row(8) = buffer.at(\"PITCH\").unaryExpr(&sinf);\n  // TODO option here if problem\n  // mean_init_buffer.row(9) = buffer.at(\"ROLL\").unaryExpr(&sinf);\n  mean_helper_->initializeLSTM(mean_init_buffer);\n\n  Eigen::MatrixXf unc_init_buffer = uncertainty_helper_->getEmptyBufferMatrix(buffer.at(\"VEL_X\").rows());\n  unc_init_buffer.row(0) = buffer.at(\"VEL_X\");\n  unc_init_buffer.row(1) = buffer.at(\"OMEGA_Z\");\n  unc_init_buffer.row(2) = buffer.at(\"BRAKE_STATE\");\n  unc_init_buffer.row(3) = buffer.at(\"STEER_ANGLE\");\n  unc_init_buffer.row(4) = buffer.at(\"STEER_ANGLE_RATE\");\n  unc_init_buffer.row(5) = buffer.at(\"CAN_THROTTLE_CMD\");\n  unc_init_buffer.row(6) = buffer.at(\"CAN_BRAKE_CMD\");\n  unc_init_buffer.row(7) = buffer.at(\"CAN_STEER_CMD\");\n  unc_init_buffer.row(8) = buffer.at(\"PITCH\").unaryExpr(&sinf);\n  unc_init_buffer.row(9) = buffer.at(\"ROLL\").unaryExpr(&sinf);\n  uncertainty_helper_->initializeLSTM(unc_init_buffer);\n  return true;\n}\n\nRacerDubinsElevationLSTMUncertainty::state_array\nRacerDubinsElevationLSTMUncertainty::stateFromMap(const std::map<std::string, float>& map)\n{\n  std::vector<std::string> needed_keys = { \"VEL_X\",       \"VEL_Z\",  \"POS_X\", \"POS_Y\", \"POS_Z\",       \"OMEGA_X\",\n                                           \"OMEGA_Y\",     \"ROLL\",   \"PITCH\", \"YAW\",   \"STEER_ANGLE\", \"STEER_ANGLE_RATE\",\n                                           \"BRAKE_STATE\", \"OMEGA_Z\" };\n  std::vector<std::string> uncertainty_keys = { \"UNCERTAINTY_POS_X\",       \"UNCERTAINTY_POS_Y\",\n                                                \"UNCERTAINTY_YAW\",         \"UNCERTAINTY_VEL_X\",\n                                                \"UNCERTAINTY_POS_X_Y\",     \"UNCERTAINTY_POS_X_YAW\",\n                                                \"UNCERTAINTY_POS_X_VEL_X\", \"UNCERTAINTY_POS_Y_YAW\",\n                                                \"UNCERTAINTY_POS_Y_VEL_X\", \"UNCERTAINTY_YAW_VEL_X\" };\n  const bool use_uncertainty = false;\n  if (use_uncertainty)\n  {\n    needed_keys.insert(needed_keys.end(), uncertainty_keys.begin(), uncertainty_keys.end());\n  }\n\n  bool missing_key = false;\n  state_array s = state_array::Zero();\n  for (const auto& key : needed_keys)\n  {\n    if (map.find(key) == map.end())\n    {  // Print out all missing keys\n      std::cout << \"Could not find key \" << key << \" for elevation with simple suspension dynamics.\" << std::endl;\n      missing_key = true;\n    }\n  }\n  if (missing_key)\n  {\n    return state_array::Constant(NAN);\n  }\n\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(CG_POS_Z)) = map.at(\"POS_Z\");\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  float bl_v_I_z = map.at(\"VEL_Z\") * cosf(map.at(\"PITCH\")) - map.at(\"VEL_X\") * sinf(map.at(\"PITCH\"));\n  s(S_INDEX(CG_VEL_I_Z)) = bl_v_I_z - map.at(\"OMEGA_Y\") * this->params_.c_g.x;\n  s(S_INDEX(STEER_ANGLE)) = map.at(\"STEER_ANGLE\");\n  s(S_INDEX(STEER_ANGLE_RATE)) = map.at(\"STEER_ANGLE_RATE\");\n  s(S_INDEX(ROLL)) = map.at(\"ROLL\");\n  s(S_INDEX(PITCH)) = map.at(\"PITCH\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  s(S_INDEX(OMEGA_Z)) = map.at(\"OMEGA_Z\");\n  // Set z position to cg's z position in world frame\n  float3 rotation = make_float3(s(S_INDEX(ROLL)), s(S_INDEX(PITCH)), s(S_INDEX(YAW)));\n  float3 world_pose = make_float3(s(S_INDEX(POS_X)), s(S_INDEX(POS_Y)), s(S_INDEX(CG_POS_Z)));\n  float3 cg_in_world_frame;\n  mppi::math::bodyOffsetToWorldPoseEuler(this->params_.c_g, world_pose, rotation, cg_in_world_frame);\n  s(S_INDEX(CG_POS_Z)) = cg_in_world_frame.z;\n  s(S_INDEX(ROLL_RATE)) = map.at(\"OMEGA_X\");\n  s(S_INDEX(PITCH_RATE)) = map.at(\"OMEGA_Y\");\n  s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_STATE\");\n\n  if (use_uncertainty)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_X)) = map.at(\"UNCERTAINTY_POS_X\");\n    s(S_INDEX(UNCERTAINTY_POS_Y)) = map.at(\"UNCERTAINTY_POS_Y\");\n    s(S_INDEX(UNCERTAINTY_YAW)) = map.at(\"UNCERTAINTY_YAW\");\n    s(S_INDEX(UNCERTAINTY_VEL_X)) = map.at(\"UNCERTAINTY_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_POS_X_Y)) = map.at(\"UNCERTAINTY_POS_X_Y\");\n    s(S_INDEX(UNCERTAINTY_POS_X_YAW)) = map.at(\"UNCERTAINTY_POS_X_YAW\");\n    s(S_INDEX(UNCERTAINTY_POS_X_VEL_X)) = map.at(\"UNCERTAINTY_POS_X_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_POS_Y_YAW)) = map.at(\"UNCERTAINTY_POS_Y_YAW\");\n    s(S_INDEX(UNCERTAINTY_POS_Y_VEL_X)) = map.at(\"UNCERTAINTY_POS_Y_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_YAW_VEL_X)) = map.at(\"UNCERTAINTY_YAW_VEL_X\");\n  }\n  float eps = 1e-6f;\n  if (s(S_INDEX(UNCERTAINTY_POS_X)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_X)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_POS_Y)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_Y)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_YAW)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_YAW)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_VEL_X)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_VEL_X)) = eps;\n  }\n  return s;\n}\n\nvoid RacerDubinsElevationLSTMUncertainty::initializeDynamics(const Eigen::Ref<const state_array>& state,\n                                                             const Eigen::Ref<const control_array>& control,\n                                                             Eigen::Ref<output_array> output, float t_0, float dt)\n{\n  uncertainty_helper_->resetLSTMHiddenCellCPU();\n  mean_helper_->resetLSTMHiddenCellCPU();\n  PARENT_CLASS::initializeDynamics(state, control, output, t_0, dt);\n}\n\nvoid RacerDubinsElevationLSTMUncertainty::step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state,\n                                               Eigen::Ref<state_array> state_der,\n                                               const Eigen::Ref<const control_array>& control,\n                                               Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  // this is the new brake model with quadratics\n  bool enable_brake = control(C_INDEX(THROTTLE_BRAKE)) < 0.0f;\n  const float brake_error = (enable_brake * -control(C_INDEX(THROTTLE_BRAKE)) - state(S_INDEX(BRAKE_STATE)));\n\n  state_der(S_INDEX(BRAKE_STATE)) =\n      fminf(fmaxf((brake_error > 0) * (brake_error * this->params_.pos_quad_brake_c[0] +\n                                       brake_error * fabsf(brake_error) * this->params_.pos_quad_brake_c[1]) +\n                      (brake_error < 0) * (brake_error * this->params_.neg_quad_brake_c[0] +\n                                           brake_error * fabsf(brake_error) * this->params_.neg_quad_brake_c[1]),\n                  -this->params_.max_brake_rate_neg),\n            this->params_.max_brake_rate_pos);\n  this->computeParametricAccelDeriv(state, control, state_der, dt);\n  this->computeLSTMSteering(state, control, state_der);\n  this->computeSimpleSuspensionStep(state, state_der, output);\n\n  // only on in reverse, default back to parametric in reverse\n  if (this->params_.gear_sign == 1)\n  {\n    // compute the mean LSTM\n    Eigen::VectorXf input = mean_helper_->getLSTMModel()->getZeroInputVector();\n    input(0) = state(S_INDEX(VEL_X));\n    input(1) = state(S_INDEX(OMEGA_Z));\n    input(2) = state(S_INDEX(BRAKE_STATE));\n    input(3) = state(S_INDEX(STEER_ANGLE));\n    input(4) = state(S_INDEX(STEER_ANGLE_RATE));\n    input(5) = control(C_INDEX(THROTTLE_BRAKE)) >= 0.0f ? control(C_INDEX(THROTTLE_BRAKE)) : 0.0f;\n    input(6) = control(C_INDEX(THROTTLE_BRAKE)) <= 0.0f ? -control(C_INDEX(THROTTLE_BRAKE)) : 0.0f;\n    input(7) = control(C_INDEX(STEER_CMD));\n    input(8) = sinf(state(S_INDEX(STATIC_PITCH)));\n    input(9) = state_der(S_INDEX(VEL_X));\n    input(10) = state_der(S_INDEX(YAW));\n    Eigen::VectorXf mean_output = mean_helper_->getLSTMModel()->getZeroOutputVector();\n    mean_helper_->forward(input, mean_output);\n    state_der(S_INDEX(VEL_X)) += mean_output(0);\n    state_der(S_INDEX(YAW)) += mean_output(1);\n  }\n  next_state[S_INDEX(OMEGA_Z)] = state_der[S_INDEX(YAW)];\n\n  // Integrate using racer_dubins updateState\n  updateState(state, next_state, state_der, dt);\n  SharedBlock sb;\n  computeUncertaintyPropagation(state.data(), control.data(), state_der.data(), next_state.data(), dt, &this->params_,\n                                &sb, nullptr);\n  float roll = state(S_INDEX(STATIC_ROLL));\n  float pitch = state(S_INDEX(STATIC_PITCH));\n  RACER::computeStaticSettling<typename DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n      this->tex_helper_, next_state(S_INDEX(YAW)), next_state(S_INDEX(POS_X)), next_state(S_INDEX(POS_Y)), roll, pitch,\n      output.data());\n  next_state[S_INDEX(STATIC_PITCH)] = pitch;\n  next_state[S_INDEX(STATIC_ROLL)] = roll;\n\n  this->setOutputs(state_der.data(), next_state.data(), output.data());\n}\n\n__device__ __host__ bool RacerDubinsElevationLSTMUncertainty::computeQ(const float* state, const float* control,\n                                                                       const float* state_der, float* Q,\n                                                                       RacerDubinsElevationUncertaintyParams* params_p,\n                                                                       float* theta_s)\n{\n  // in reverse just use base level implementation\n  if (params_p->gear_sign == -1)\n  {\n    PARENT_CLASS::computeQ(state, control, state_der, Q, params_p, theta_s);\n  }\n  // TODO make the roll and pitch the ones from static settling, not the ones calculated online\n  float sin_yaw, cos_yaw;\n  float* uncertainty_output;\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n#ifdef __CUDA_ARCH__\n\n  const float yaw_norm = angle_utils::normalizeAngle(state[S_INDEX(YAW)]);\n  __sincosf(yaw_norm, &sin_yaw, &cos_yaw);\n\n  const int grd_shift = this->SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);  // grid size to shift by\n  const int blk_shift = this->SHARED_MEM_REQUEST_BLK_BYTES * (threadIdx.x + blockDim.x * threadIdx.z) /\n                        sizeof(float);  // blk size to shift by\n  const int sb_shift =\n      sizeof(SharedBlock) / sizeof(float) + network_d_->getBlkSharedSizeBytes() / sizeof(float) +\n      mean_d_->getBlkSharedSizeBytes() / sizeof(float);  // how much to shift inside a block to lstm values\n\n  float* input_loc = uncertainty_d_->getInputLocation(theta_s, grd_shift, blk_shift, sb_shift);\n\n  for (int i = pi; i < uncertainty_d_->getInputDim(); i += step)\n  {\n    switch (i)\n    {\n      case 0:  // vx\n        input_loc[i] = state[S_INDEX(VEL_X)];\n        break;\n      case 1:  // omega\n        input_loc[i] = state[S_INDEX(OMEGA_Z)];\n        break;\n      case 2:  // brake state\n        input_loc[i] = state[S_INDEX(BRAKE_STATE)];\n        break;\n      case 3:  // shaft angle\n        input_loc[i] = state[S_INDEX(STEER_ANGLE)];\n        break;\n      case 4:  // shaft velocity\n        input_loc[i] = state[S_INDEX(STEER_ANGLE_RATE)];\n        break;\n      case 5:  // throttle_cmd\n        input_loc[i] = control[C_INDEX(THROTTLE_BRAKE)] >= 0.0f ? control[C_INDEX(THROTTLE_BRAKE)] : 0.0f;\n        break;\n      case 6:  // brake_cmd\n        input_loc[i] = control[C_INDEX(THROTTLE_BRAKE)] <= 0.0f ? -control[C_INDEX(THROTTLE_BRAKE)] : 0.0f;\n        break;\n      case 7:  // steering cmd\n        input_loc[i] = control[C_INDEX(STEER_CMD)];\n        break;\n      case 8:  // sin roll\n        input_loc[i] = __sinf(state[S_INDEX(STATIC_ROLL)]);\n        break;\n      case 9:  // sin pitch\n        input_loc[i] = __sinf(state[S_INDEX(STATIC_PITCH)]);\n        break;\n      case 10:  // accel x\n        input_loc[i] = state_der[S_INDEX(VEL_X)];\n        break;\n      case 11:  // yaw rate calculation\n        input_loc[i] = state_der[S_INDEX(YAW)];\n        break;\n    }\n  }\n  __syncthreads();\n\n  float* cur_hidden_cell = uncertainty_d_->getHiddenCellLocation(theta_s, grd_shift, blk_shift, sb_shift);\n  uncertainty_output = uncertainty_d_->forward(\n      nullptr, theta_s + (network_d_->getGrdSharedSizeBytes() + mean_d_->getGrdSharedSizeBytes()) / sizeof(float),\n      cur_hidden_cell);\n  int output_dim = uncertainty_d_->getOutputDim();\n#else\n  sincosf(state[S_INDEX(YAW)], &sin_yaw, &cos_yaw);\n\n  Eigen::VectorXf input = uncertainty_helper_->getLSTMModel()->getZeroInputVector();\n  input(0) = state[S_INDEX(VEL_X)];\n  input(1) = state[S_INDEX(OMEGA_Z)];\n  input(2) = state[S_INDEX(BRAKE_STATE)];\n  input(3) = state[S_INDEX(STEER_ANGLE)];\n  input(4) = state[S_INDEX(STEER_ANGLE_RATE)];\n  input(5) = control[C_INDEX(THROTTLE_BRAKE)] >= 0.0f ? control[C_INDEX(THROTTLE_BRAKE)] : 0.0f;\n  input(6) = control[C_INDEX(THROTTLE_BRAKE)] < 0.0f ? -control[C_INDEX(THROTTLE_BRAKE)] : 0.0f;\n  input(7) = control[C_INDEX(STEER_CMD)];\n  input(8) = sinf(state[S_INDEX(STATIC_ROLL)]);\n  input(9) = sinf(state[S_INDEX(STATIC_PITCH)]);\n  input(10) = state_der[S_INDEX(VEL_X)];  // these should be using the modified means\n  input(11) = state_der[S_INDEX(YAW)];\n  Eigen::VectorXf uncertainty_output_eig = uncertainty_helper_->getLSTMModel()->getZeroOutputVector();\n  uncertainty_helper_->forward(input, uncertainty_output_eig);\n\n  // TODO need to sigmoid and scale\n  uncertainty_output = uncertainty_output_eig.data();\n\n  int output_dim = uncertainty_helper_->getLSTMModel()->getOutputDim();\n#endif\n\n  for (int i = pi; i < output_dim; i += step)\n  {\n    uncertainty_output[i] = fabsf(mppi::nn::sigmoid(uncertainty_output[i]) * params_p->unc_scale[i]);\n  }\n#ifdef __CUDA_ARCH__\n  __syncthreads();\n#endif\n\n  const float linear_brake_slope = 0.2f;\n  const int index = (fabsf(state[S_INDEX(VEL_X)]) > linear_brake_slope && fabsf(state[S_INDEX(VEL_X)]) <= 3.0f) +\n                    (fabsf(state[S_INDEX(VEL_X)]) > 3.0f) * 2;\n  // TODO add in additional uncertainty based on output dim of network\n  for (int i = pi; i < UNCERTAINTY_DIM * UNCERTAINTY_DIM; i += step)\n  {\n    switch (i)\n    {\n      // vel_x\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = uncertainty_output[0] +\n               SQ(params_p->c_b[index] * (index == 0 ? state[S_INDEX(VEL_X)] : 1.0f)) * uncertainty_output[4];\n        if (output_dim == 7)\n        {\n          Q[i] += uncertainty_output[5];  // additional Q_vx\n        }\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(VEL_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n        // yaw\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(YAW), UNCERTAINTY_DIM):\n#ifdef __CUDA_ARCH__\n        Q[i] =\n            uncertainty_output[1] +\n            SQ((state[S_INDEX(VEL_X)] / params_p->wheel_base) * 1.0f /\n               (SQ(__cosf(state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale)) * params_p->steer_angle_scale)) *\n                uncertainty_output[3];\n#else\n        Q[i] = uncertainty_output[1] +\n               SQ((state[S_INDEX(VEL_X)] / params_p->wheel_base) * 1.0f /\n                  (SQ(cosf(state[S_INDEX(STEER_ANGLE)] / params_p->steer_angle_scale)) * params_p->steer_angle_scale)) *\n                   uncertainty_output[3];\n#endif\n        if (output_dim == 7)\n        {\n          Q[i] += uncertainty_output[6];  // additional Q_yaw\n        }\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(YAW), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n        // pos x\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = uncertainty_output[2] * sin_yaw * sin_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_X), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = -uncertainty_output[2] * sin_yaw * cos_yaw;\n        break;\n        // pos y\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(VEL_X), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(YAW), UNCERTAINTY_DIM):\n        Q[i] = 0.0f;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_Y), UNCERTAINTY_DIM):\n        Q[i] = uncertainty_output[2] * cos_yaw * cos_yaw;\n        break;\n      case mm::columnMajorIndex(U_INDEX(POS_Y), U_INDEX(POS_X), UNCERTAINTY_DIM):\n        Q[i] = -uncertainty_output[2] * sin_yaw * cos_yaw;\n        break;\n    }\n  }\n  return true;\n}\n\n__device__ void RacerDubinsElevationLSTMUncertainty::step(float* state, float* next_state, float* state_der,\n                                                          float* control, float* output, float* theta_s, const float t,\n                                                          const float dt)\n{\n  DYN_PARAMS_T* params_p = &(this->params_);\n  const int grd_shift = this->SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);  // grid size to shift by\n  const int blk_shift = this->SHARED_MEM_REQUEST_BLK_BYTES * (threadIdx.x + blockDim.x * threadIdx.z) /\n                        sizeof(float);  // blk size to shift by\n  float* sb_mem = &theta_s[grd_shift];  // does the grid shift\n  SharedBlock* sb = (SharedBlock*)(sb_mem + blk_shift);\n  const int sb_shift =\n      sizeof(SharedBlock) / sizeof(float) +\n      network_d_->getBlkSharedSizeBytes() / sizeof(float);  // how much to shift inside a block to lstm values\n\n  // this is the new brake model with quadratics\n  bool enable_brake = control[C_INDEX(THROTTLE_BRAKE)] < 0.0f;\n  const float brake_error = (enable_brake * -control[C_INDEX(THROTTLE_BRAKE)] - state[S_INDEX(BRAKE_STATE)]);\n\n  state_der[S_INDEX(BRAKE_STATE)] =\n      fminf(fmaxf((brake_error > 0) * (brake_error * params_p->pos_quad_brake_c[0] +\n                                       brake_error * fabsf(brake_error) * params_p->pos_quad_brake_c[1]) +\n                      (brake_error < 0) * (brake_error * params_p->neg_quad_brake_c[0] +\n                                           brake_error * fabsf(brake_error) * params_p->neg_quad_brake_c[1]),\n                  -params_p->max_brake_rate_neg),\n            params_p->max_brake_rate_pos);\n  computeParametricAccelDeriv(state, control, state_der, dt, params_p);\n  computeLSTMSteering(state, control, state_der, params_p, theta_s, grd_shift, blk_shift,\n                      sizeof(SharedBlock) / sizeof(float));\n  computeSimpleSuspensionStep(state, state_der, output, params_p, theta_s);\n\n  // TODO use the settling pitch and roll not the state based one\n  // only on in reverse, default back to parametric in reverse\n  if (params_p->gear_sign == 1)\n  {\n    // computes the mean compensation LSTM\n    float* input_loc = mean_d_->getInputLocation(theta_s, grd_shift, blk_shift, sb_shift);\n    int pi, step;\n    mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(pi, step);\n    for (int i = pi; i < mean_d_->getInputDim(); i += step)\n    {\n      switch (i)\n      {\n        case 0:  // vx\n          input_loc[i] = state[S_INDEX(VEL_X)];\n          break;\n        case 1:  // omega\n          input_loc[i] = state[S_INDEX(OMEGA_Z)];\n          break;\n        case 2:  // brake state\n          input_loc[i] = state[S_INDEX(BRAKE_STATE)];\n          break;\n        case 3:  // shaft angle\n          input_loc[i] = state[S_INDEX(STEER_ANGLE)];\n          break;\n        case 4:  // shaft velocity\n          input_loc[i] = state[S_INDEX(STEER_ANGLE_RATE)];\n          break;\n        case 5:  // throttle_cmd\n          input_loc[i] = control[C_INDEX(THROTTLE_BRAKE)] >= 0.0f ? control[C_INDEX(THROTTLE_BRAKE)] : 0.0f;\n          break;\n        case 6:  // brake_cmd\n          input_loc[i] = control[C_INDEX(THROTTLE_BRAKE)] <= 0.0f ? -control[C_INDEX(THROTTLE_BRAKE)] : 0.0f;\n          break;\n        case 7:  // steering cmd\n          input_loc[i] = control[C_INDEX(STEER_CMD)];\n          break;\n        case 8:  // sin pitch\n#ifdef __CUDA_ARCH__\n          input_loc[i] = __sinf(state[S_INDEX(STATIC_PITCH)]);\n#else\n          input_loc[i] = sinf(state[S_INDEX(STATIC_PITCH)]);\n#endif\n          break;\n        case 9:  // accel x\n          input_loc[i] = state_der[S_INDEX(VEL_X)];\n          break;\n        case 10:  // yaw rate calculation\n          input_loc[i] = state_der[S_INDEX(YAW)];\n          break;\n      }\n    }\n    __syncthreads();\n\n    float* cur_hidden_cell = mean_d_->getHiddenCellLocation(theta_s, grd_shift, blk_shift, sb_shift);\n    float* mean_output =\n        mean_d_->forward(nullptr, theta_s + network_d_->getGrdSharedSizeBytes() / sizeof(float), cur_hidden_cell);\n\n    if (threadIdx.y == 0)\n    {\n      state_der[S_INDEX(VEL_X)] += mean_output[0];\n      state_der[S_INDEX(YAW)] += mean_output[1];\n      next_state[S_INDEX(OMEGA_Z)] = state_der[S_INDEX(YAW)];\n    }\n    __syncthreads();\n  }\n  next_state[S_INDEX(OMEGA_Z)] = state_der[S_INDEX(YAW)];\n\n  updateState(state, next_state, state_der, dt);\n  computeUncertaintyPropagation(state, control, state_der, next_state, dt, params_p, sb, theta_s);\n\n  float roll = state[S_INDEX(STATIC_ROLL)];\n  float pitch = state[S_INDEX(STATIC_PITCH)];\n  RACER::computeStaticSettling<DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n      this->tex_helper_, next_state[S_INDEX(YAW)], next_state[S_INDEX(POS_X)], next_state[S_INDEX(POS_Y)], roll, pitch,\n      output);\n  next_state[S_INDEX(STATIC_PITCH)] = pitch;\n  next_state[S_INDEX(STATIC_ROLL)] = roll;\n  //__syncthreads();\n  this->setOutputs(state_der, next_state, output);\n}\n\n__device__ void RacerDubinsElevationLSTMUncertainty::initializeDynamics(float* state, float* control, float* output,\n                                                                        float* theta_s, float t_0, float dt)\n{\n  PARENT_CLASS::initializeDynamics(state, control, output, theta_s, t_0, dt);\n  int blk_shift = sizeof(SharedBlock) / sizeof(float) + network_d_->getBlkSharedSizeBytes() / sizeof(float);\n  mean_d_->initialize(theta_s, this->SHARED_MEM_REQUEST_BLK_BYTES, this->SHARED_MEM_REQUEST_GRD_BYTES, blk_shift,\n                      network_d_->getGrdSharedSizeBytes());\n  blk_shift = sizeof(SharedBlock) / sizeof(float) + network_d_->getBlkSharedSizeBytes() / sizeof(float) +\n              mean_d_->getBlkSharedSizeBytes() / sizeof(float);\n  uncertainty_d_->initialize(theta_s, this->SHARED_MEM_REQUEST_BLK_BYTES, this->SHARED_MEM_REQUEST_GRD_BYTES, blk_shift,\n                             network_d_->getGrdSharedSizeBytes() + mean_d_->getGrdSharedSizeBytes());\n}\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation_lstm_unc.cuh",
    "content": "#include \"racer_dubins_elevation_suspension_lstm.cuh\"\n\n#ifndef MPPIGENERIC_RACER_DUBINS_ELEVATION_LSTM_UNC_CUH\n#define MPPIGENERIC_RACER_DUBINS_ELEVATION_LSTM_UNC_CUH\n\nstruct RacerDubinsElevationUncertaintyParams : public RacerDubinsElevationSuspensionParams\n{\n  enum class StateIndex : int\n  {\n    VEL_X = 0,\n    YAW,\n    POS_X,\n    POS_Y,\n    STEER_ANGLE,\n    BRAKE_STATE,\n    ROLL,\n    PITCH,\n    CG_POS_Z,\n    CG_VEL_I_Z,\n    ROLL_RATE,\n    PITCH_RATE,\n    STEER_ANGLE_RATE,\n    OMEGA_Z,\n    STATIC_ROLL,\n    STATIC_PITCH,\n    UNCERTAINTY_POS_X,\n    UNCERTAINTY_POS_Y,\n    UNCERTAINTY_YAW,\n    UNCERTAINTY_VEL_X,\n    UNCERTAINTY_POS_X_Y,\n    UNCERTAINTY_POS_X_YAW,\n    UNCERTAINTY_POS_X_VEL_X,\n    UNCERTAINTY_POS_Y_YAW,\n    UNCERTAINTY_POS_Y_VEL_X,\n    UNCERTAINTY_YAW_VEL_X,\n    NUM_STATES\n  };\n\n  float unc_scale[7] = { 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f };\n  float pos_quad_brake_c[3] = { 2.0f, 0.5f, 0.3 };\n  float neg_quad_brake_c[3] = { 5.84f, 0.15f, 1.7f };\n  bool use_static_settling = true;\n};\n\nclass RacerDubinsElevationLSTMUncertainty\n  : public RacerDubinsElevationSuspensionImpl<RacerDubinsElevationLSTMUncertainty,\n                                              RacerDubinsElevationUncertaintyParams>\n{\npublic:\n  using PARENT_CLASS =\n      RacerDubinsElevationSuspensionImpl<RacerDubinsElevationLSTMUncertainty, RacerDubinsElevationUncertaintyParams>;\n\n  typedef typename PARENT_CLASS::DYN_PARAMS_T DYN_PARAMS_T;\n\n  typedef typename PARENT_CLASS::state_array state_array;\n  typedef typename PARENT_CLASS::control_array control_array;\n  typedef typename PARENT_CLASS::output_array output_array;\n  typedef typename PARENT_CLASS::dfdx dfdx;\n  typedef typename PARENT_CLASS::dfdu dfdu;\n\n  RacerDubinsElevationLSTMUncertainty(LSTMLSTMConfig& steer_config, LSTMLSTMConfig& mean_config,\n                                      LSTMLSTMConfig& unc_config, cudaStream_t stream = nullptr);\n  RacerDubinsElevationLSTMUncertainty(std::string path, cudaStream_t stream = nullptr);\n\n  std::string getDynamicsModelName() const override\n  {\n    // TODO check if mean model is none or not\n    return \"RACER Dubins LSTM Uncertainty Model\";\n  }\n\n  void GPUSetup();\n  void bindToStream(cudaStream_t stream);\n  void freeCudaMem();\n\n  bool updateFromBuffer(const buffer_trajectory& buffer);\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  void initializeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                          Eigen::Ref<output_array> output, float t_0, float dt);\n  state_array stateFromMap(const std::map<std::string, float>& map);\n\n  __host__ __device__ bool computeQ(const float* state, const float* control, const float* state_der, float* Q,\n                                    DYN_PARAMS_T* params_p, float* theta_s);\n\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n\n  __device__ void initializeDynamics(float* state, float* control, float* output, float* theta_s, float t_0, float dt);\n\n  // __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  // void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n  //                  Eigen::Ref<state_array> state_der, const float dt);\n\n  std::shared_ptr<LSTMLSTMHelper<>> getUncertaintyHelper()\n  {\n    return uncertainty_helper_;\n  }\n  std::shared_ptr<LSTMLSTMHelper<>> getMeanHelper()\n  {\n    return mean_helper_;\n  }\n\n  LSTMHelper<>* uncertainty_d_ = nullptr;\n  LSTMHelper<>* mean_d_ = nullptr;\n\nprotected:\n  std::shared_ptr<LSTMLSTMHelper<>> uncertainty_helper_;\n  std::shared_ptr<LSTMLSTMHelper<>> mean_helper_;\n};\n\n#if __CUDACC__\n#include \"racer_dubins_elevation_lstm_unc.cu\"\n#endif\n\n#endif  // MPPIGENERIC_RACER_DUBINS_ELEVATION_LSTM_UNC_CUH\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation_suspension_lstm.cu",
    "content": "//\n// Created by Bogdan on 02/21/2024\n//\n\n#include <eigen3/Eigen/src/Geometry/Quaternion.h>\n#include \"racer_dubins_elevation_suspension_lstm.cuh\"\n\n#define TEMPLATE_TYPE template <class CLASS_T, class PARAMS_T>\n#define TEMPLATE_NAME RacerDubinsElevationSuspensionImpl<CLASS_T, PARAMS_T>\n\nTEMPLATE_TYPE\nTEMPLATE_NAME::RacerDubinsElevationSuspensionImpl(int init_input_dim, int init_hidden_dim,\n                                                  std::vector<int>& init_output_layers, int input_dim, int hidden_dim,\n                                                  std::vector<int>& output_layers, int init_len, cudaStream_t stream)\n  : PARENT_CLASS(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers, init_len,\n                 stream)\n{\n  normals_tex_helper_ = new TwoDTextureHelper<float4>(1, stream);\n}\n\nTEMPLATE_TYPE\nTEMPLATE_NAME::RacerDubinsElevationSuspensionImpl(std::string path, cudaStream_t stream) : PARENT_CLASS(path, stream)\n{\n  normals_tex_helper_ = new TwoDTextureHelper<float4>(1, stream);\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::bindToStream(cudaStream_t stream)\n{\n  normals_tex_helper_->bindToStream(stream);\n  PARENT_CLASS::bindToStream(stream);\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::paramsToDevice()\n{\n  normals_tex_helper_->copyToDevice();\n  PARENT_CLASS::paramsToDevice();\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::GPUSetup()\n{\n  PARENT_CLASS::GPUSetup();\n\n  normals_tex_helper_->GPUSetup();\n  // makes sure that the device ptr sees the correct texture object\n  HANDLE_ERROR(cudaMemcpyAsync(&(this->model_d_->normals_tex_helper_), &(normals_tex_helper_->ptr_d_),\n                               sizeof(TwoDTextureHelper<float4>*), cudaMemcpyHostToDevice, this->stream_));\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::freeCudaMem()\n{\n  normals_tex_helper_->freeCudaMem();\n  PARENT_CLASS::freeCudaMem();\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::computeSimpleSuspensionStep(Eigen::Ref<state_array> state, Eigen::Ref<state_array> state_der,\n                                                Eigen::Ref<output_array> output)\n{\n  DYN_PARAMS_T* params_p = &(this->params_);\n\n  // Calculate suspension-based state derivatives\n  const float& x = state(S_INDEX(POS_X));\n  const float& y = state(S_INDEX(POS_Y));\n  const float& roll = state(S_INDEX(ROLL));\n  const float& pitch = state(S_INDEX(PITCH));\n  const float& yaw = state(S_INDEX(YAW));\n  float3 wheel_positions_body[W_INDEX(NUM_WHEELS)];\n  float3 wheel_positions_world[W_INDEX(NUM_WHEELS)];\n  float3 wheel_positions_cg[W_INDEX(NUM_WHEELS)];\n  wheel_positions_body[W_INDEX(FR)] = make_float3(2.981f, -0.737f, 0.f);\n  wheel_positions_body[W_INDEX(FL)] = make_float3(2.981f, 0.737f, 0.0f);\n  wheel_positions_body[W_INDEX(BR)] = make_float3(0.0f, 0.737f, 0.0f);\n  wheel_positions_body[W_INDEX(BL)] = make_float3(0.0f, -0.737f, 0.f);\n\n  float3 body_pose = make_float3(x, y, 0.0f);\n  // rotation matrix representation\n  // float3 rotation = make_float3(roll, pitch, yaw);\n  Eigen::Matrix3f M;\n  mppi::math::Euler2DCM_NWU(roll, pitch, yaw, M);\n  float wheel_pos_z, wheel_vel_z;\n  float wheel_height = 0.0f;\n  float4 wheel_normal_world = make_float4(0.0f, 0.0f, 1.0f, 0.0f);\n  int pi, step;\n\n  state_der(S_INDEX(ROLL)) = state(S_INDEX(ROLL_RATE));\n  state_der(S_INDEX(PITCH)) = state(S_INDEX(PITCH_RATE));\n  state_der(S_INDEX(CG_POS_Z)) = state(S_INDEX(CG_VEL_I_Z));\n  state_der(S_INDEX(CG_VEL_I_Z)) = 0.0f;\n  state_der(S_INDEX(ROLL_RATE)) = 0.0f;\n  state_der(S_INDEX(PITCH_RATE)) = 0.0f;\n  output(O_INDEX(WHEEL_FORCE_UP_MAX)) = -std::numeric_limits<float>::max();\n  output(O_INDEX(WHEEL_FORCE_FWD_MAX)) = -std::numeric_limits<float>::max();\n  output(O_INDEX(WHEEL_FORCE_SIDE_MAX)) = -std::numeric_limits<float>::max();\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(pi, step);\n\n  float wheel_yaw, sin_wheel_yaw, cos_wheel_yaw;\n  for (int i = pi; i < W_INDEX(NUM_WHEELS); i += step)\n  {\n    // Calculate wheel position in different frames\n    wheel_positions_cg[i] = wheel_positions_body[i] - params_p->c_g;\n    mppi::math::bodyOffsetToWorldPoseDCM(wheel_positions_body[i], body_pose, M, wheel_positions_world[i]);\n    if (this->tex_helper_->checkTextureUse(0))\n    {\n      wheel_height = this->tex_helper_->queryTextureAtWorldPose(0, wheel_positions_world[i]);\n      if (!isfinite(wheel_height))\n      {\n        wheel_height = state(S_INDEX(CG_POS_Z)) - params_p->wheel_radius;\n      }\n    }\n    if (normals_tex_helper_->checkTextureUse(0))\n    {\n      wheel_normal_world = normals_tex_helper_->queryTextureAtWorldPose(0, wheel_positions_world[i]);\n      if (!isfinite(wheel_normal_world.x) || !isfinite(wheel_normal_world.y) || !isfinite(wheel_normal_world.z))\n      {\n        wheel_normal_world = make_float4(0.0, 0.0, 1.0, 0.0);\n      }\n    }\n\n    if (i == W_INDEX(FR) || i == W_INDEX(FL))\n    {\n      wheel_yaw = yaw + S_INDEX(STEER_ANGLE) / -9.1f;\n    }\n    else\n    {\n      wheel_yaw = yaw;\n    }\n    sincosf(wheel_yaw, &sin_wheel_yaw, &cos_wheel_yaw);\n\n    // Calculate wheel heights, velocities, and forces\n    wheel_pos_z = state(S_INDEX(CG_POS_Z)) + roll * wheel_positions_cg[i].y - pitch * wheel_positions_cg[i].x -\n                  params_p->wheel_radius;\n    wheel_vel_z = state(S_INDEX(CG_VEL_I_Z)) + state(S_INDEX(ROLL_RATE)) * wheel_positions_cg[i].y -\n                  state(S_INDEX(PITCH_RATE)) * wheel_positions_cg[i].x;\n\n    // h_dot = - V_I * N_I\n    float h_dot = -(state[S_INDEX(VEL_X)] * cos_wheel_yaw * wheel_normal_world.x +\n                    state[S_INDEX(VEL_X)] * sin_wheel_yaw * wheel_normal_world.y);\n\n    float wheel_force = -params_p->spring_k * (wheel_pos_z - wheel_height) - params_p->drag_c * (wheel_vel_z - h_dot);\n    float up_wheel_force = wheel_force;  // + params_p->mass * (9.81f / 4.0f);\n    float fwd_wheel_force =\n        wheel_force / wheel_normal_world.z *\n        (wheel_normal_world.x * cos_wheel_yaw + wheel_normal_world.y * sin_wheel_yaw + wheel_normal_world.z * (-pitch));\n    float side_wheel_force =\n        wheel_force / wheel_normal_world.z *\n        (-wheel_normal_world.x * sin_wheel_yaw + wheel_normal_world.y * cos_wheel_yaw + wheel_normal_world.z * roll);\n    output(O_INDEX(WHEEL_FORCE_UP_MAX)) = fmaxf(output(O_INDEX(WHEEL_FORCE_UP_MAX)), up_wheel_force);\n    output(O_INDEX(WHEEL_FORCE_FWD_MAX)) = fmaxf(output(O_INDEX(WHEEL_FORCE_FWD_MAX)), fabsf(fwd_wheel_force));\n    output(O_INDEX(WHEEL_FORCE_SIDE_MAX)) = fmaxf(output(O_INDEX(WHEEL_FORCE_SIDE_MAX)), fabsf(side_wheel_force));\n    // output(O_INDEX(WHEEL_FORCE_UP_FL) + i) = up_wheel_force;\n    // output(O_INDEX(WHEEL_FORCE_FWD_FL) + i) = fwd_wheel_force;\n    // output(O_INDEX(WHEEL_FORCE_SIDE_FL) + i) = side_wheel_force;\n    state_der(S_INDEX(CG_VEL_I_Z)) += wheel_force / params_p->mass;\n    state_der(S_INDEX(ROLL_RATE)) += wheel_force * wheel_positions_cg[i].y / params_p->I_xx;\n    state_der(S_INDEX(PITCH_RATE)) += -wheel_force * wheel_positions_cg[i].x / params_p->I_yy;\n  }\n\n  if (output(O_INDEX(WHEEL_FORCE_UP_MAX)) == 0.0f)\n  {\n    std::cout << \"got state: \" << state.transpose() << std::endl;\n  }\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state,\n                         Eigen::Ref<state_array> state_der, const Eigen::Ref<const control_array>& control,\n                         Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  this->computeParametricDelayDeriv(state, control, state_der);\n  this->computeParametricAccelDeriv(state, control, state_der, dt);\n  this->computeLSTMSteering(state, control, state_der);\n\n  computeSimpleSuspensionStep(state, state_der, output);\n\n  // Integrate using Euler Integration\n  updateState(state, next_state, state_der, dt);\n  SharedBlock sb;\n  computeUncertaintyPropagation(state.data(), control.data(), state_der.data(), next_state.data(), dt, &this->params_,\n                                &sb, nullptr);\n\n  // float roll = state(S_INDEX(ROLL));\n  // float pitch = state(S_INDEX(PITCH));\n  // RACER::computeStaticSettling<typename DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n  //     this->tex_helper_, next_state(S_INDEX(YAW)), next_state(S_INDEX(POS_X)), next_state(S_INDEX(POS_Y)), roll,\n  //     pitch, output.data());\n  // next_state[S_INDEX(PITCH)] = pitch;\n  // next_state[S_INDEX(ROLL)] = roll;\n\n  this->setOutputs(state_der.data(), next_state.data(), output.data());\n  // printf(\"CPU t: %3.0f, VEL_Z(t + 1): %f, VEL_Z(t): %f, VEl_Z'(t): %f\\n\", t, next_state(S_INDEX(CG_VEL_I_Z)),\n  // state(S_INDEX(CG_VEL_I_Z)),\n  //     state_der(S_INDEX(CG_VEL_I_Z)));\n}\n\nTEMPLATE_TYPE\n__device__ void TEMPLATE_NAME::computeSimpleSuspensionStep(float* state, float* state_der, float* output,\n                                                           DYN_PARAMS_T* params_p, float* theta_s)\n{\n  // computes the velocity dot\n  int pi, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(pi, step);\n\n  if (pi == 0)\n  {\n    state_der[S_INDEX(CG_VEL_I_Z)] = 0.0f;\n    state_der[S_INDEX(ROLL_RATE)] = 0.0f;\n    state_der[S_INDEX(PITCH_RATE)] = 0.0f;\n    state_der[S_INDEX(ROLL)] = state[S_INDEX(ROLL_RATE)];\n    state_der[S_INDEX(PITCH)] = state[S_INDEX(PITCH_RATE)];\n    state_der[S_INDEX(CG_POS_Z)] = state[S_INDEX(CG_VEL_I_Z)];\n    output[O_INDEX(WHEEL_FORCE_UP_MAX)] = 0.0f;\n    output[O_INDEX(WHEEL_FORCE_FWD_MAX)] = 0.0f;\n    output[O_INDEX(WHEEL_FORCE_SIDE_MAX)] = 0.0f;\n  }\n  const int grd_shift = this->SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);  // grid size to shift by\n  const int blk_shift = this->SHARED_MEM_REQUEST_BLK_BYTES * (threadIdx.x + blockDim.x * threadIdx.z) /\n                        sizeof(float);  // blk size to shift by\n  // uses same memory as compute space for the uncertainty\n  float* wheel_force_up = theta_s + grd_shift + blk_shift;\n  float* wheel_force_fwd = theta_s + grd_shift + blk_shift + 4;\n  float* wheel_force_side = theta_s + grd_shift + blk_shift + 8;\n  __syncthreads();\n\n  // Calculate suspension-based state derivatives\n  const float& x = state[S_INDEX(POS_X)];\n  const float& y = state[S_INDEX(POS_Y)];\n  const float& roll = state[S_INDEX(ROLL)];\n  const float& pitch = state[S_INDEX(PITCH)];\n  const float& yaw = state[S_INDEX(YAW)];\n  float3 wheel_positions_body;\n  float3 wheel_positions_world;\n\n  float3 body_pose = make_float3(x, y, 0.0f);\n  float3 rotation = make_float3(roll, pitch, yaw);\n  // rotation matrix representation\n  // TODO Check if M needs to be in shared memory\n  float M[3][3];\n  mppi::math::Euler2DCM_NWU(roll, pitch, yaw, M);\n  // mppi::math::Euler2DCM_NWU(rotation.x, rotation.y, rotation.z, M);\n  float wheel_pos_z, wheel_vel_z;\n  float wheel_height = 0.0f;\n  float h_dot = 0.0f;\n  float4 wheel_normal_world = make_float4(0.0f, 0.0f, 1.0f, 0.0f);\n  float3 wheel_positions_cg;\n  float wheel_yaw, cos_wheel_yaw, sin_wheel_yaw;\n\n  __syncthreads();\n  for (int i = pi; i < W_INDEX(NUM_WHEELS); i += step)\n  {\n    wheel_yaw = yaw;\n    // get body frame wheel positions\n    switch (i)\n    {\n      case W_INDEX(FR):\n        wheel_positions_body = make_float3(2.981f, -0.737f, 0.f);\n        wheel_yaw += S_INDEX(STEER_ANGLE) / -9.1f;\n        break;\n      case W_INDEX(FL):\n        wheel_positions_body = make_float3(2.981f, 0.737f, 0.0f);\n        wheel_yaw += S_INDEX(STEER_ANGLE) / -9.1f;\n        break;\n      case W_INDEX(BR):\n        wheel_positions_body = make_float3(0.0f, 0.737f, 0.0f);\n        break;\n      case W_INDEX(BL):\n        wheel_positions_body = make_float3(0.0f, -0.737f, 0.f);\n        break;\n      default:\n        break;\n    }\n    __sincosf(wheel_yaw, &sin_wheel_yaw, &cos_wheel_yaw);\n\n    // Calculate wheel position in different frames\n    wheel_positions_cg = wheel_positions_body - params_p->c_g;\n    mppi::math::bodyOffsetToWorldPoseDCM(wheel_positions_body, body_pose, M, wheel_positions_world);\n    if (this->tex_helper_->checkTextureUse(0))\n    {\n      wheel_height = this->tex_helper_->queryTextureAtWorldPose(0, wheel_positions_world);\n      if (!isfinite(wheel_height))\n      {\n        wheel_height = state[S_INDEX(CG_POS_Z)] - params_p->wheel_radius;\n      }\n    }\n    if (normals_tex_helper_->checkTextureUse(0))\n    {\n      wheel_normal_world = normals_tex_helper_->queryTextureAtWorldPose(0, wheel_positions_world);\n      if (!isfinite(wheel_normal_world.x) || !isfinite(wheel_normal_world.y) || !isfinite(wheel_normal_world.z))\n      {\n        wheel_normal_world = make_float4(0.0, 0.0, 1.0, 0.0);\n      }\n    }\n\n    // Calculate wheel heights, velocities, and forces\n    wheel_pos_z =\n        state[S_INDEX(CG_POS_Z)] + roll * wheel_positions_cg.y - pitch * wheel_positions_cg.x - params_p->wheel_radius;\n    wheel_vel_z = state[S_INDEX(CG_VEL_I_Z)] + state[S_INDEX(ROLL_RATE)] * wheel_positions_cg.y -\n                  state[S_INDEX(PITCH_RATE)] * wheel_positions_cg.x;\n\n    // h_dot = - V_I * N_I\n    h_dot = -(state[S_INDEX(VEL_X)] * cos_wheel_yaw * wheel_normal_world.x +\n              state[S_INDEX(VEL_X)] * sin_wheel_yaw * wheel_normal_world.y);\n\n    float wheel_force = -params_p->spring_k * (wheel_pos_z - wheel_height) - params_p->drag_c * (wheel_vel_z - h_dot);\n    float up_wheel_force = wheel_force;  // + params_p->mass * (9.81f / 4.0f);\n    float fwd_wheel_force =\n        wheel_force / wheel_normal_world.z *\n        (wheel_normal_world.x * cos_wheel_yaw + wheel_normal_world.y * sin_wheel_yaw + wheel_normal_world.z * (-pitch));\n    float side_wheel_force =\n        wheel_force / wheel_normal_world.z *\n        (-wheel_normal_world.x * sin_wheel_yaw + wheel_normal_world.y * cos_wheel_yaw + wheel_normal_world.z * roll);\n    wheel_force_up[i] = up_wheel_force;\n    wheel_force_fwd[i] = fabsf(fwd_wheel_force);\n    wheel_force_side[i] = fabsf(side_wheel_force);\n    // output[O_INDEX(WHEEL_FORCE_UP_FL) + i] = up_wheel_force;\n    // output[O_INDEX(WHEEL_FORCE_FWD_FL) + i] = fwd_wheel_force;\n    // output[O_INDEX(WHEEL_FORCE_SIDE_FL) + i] = side_wheel_force;\n#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ > 600\n    // block-specific atomicAdd is only available on compute capabilities 6.x and greater\n    atomicAdd_block(&state_der[S_INDEX(CG_VEL_I_Z)], wheel_force / params_p->mass);\n    atomicAdd_block(&state_der[S_INDEX(ROLL_RATE)], wheel_force * wheel_positions_cg.y / params_p->I_xx);\n    atomicAdd_block(&state_der[S_INDEX(PITCH_RATE)], -wheel_force * wheel_positions_cg.x / params_p->I_yy);\n#elif defined(__CUDA_ARCH__)\n    atomicAdd(&state_der[S_INDEX(CG_VEL_I_Z)], wheel_force / params_p->mass);\n    atomicAdd(&state_der[S_INDEX(ROLL_RATE)], wheel_force * wheel_positions_cg.y / params_p->I_xx);\n    atomicAdd(&state_der[S_INDEX(PITCH_RATE)], -wheel_force * wheel_positions_cg.x / params_p->I_yy);\n#endif\n  }\n  __syncthreads();\n\n  output[O_INDEX(WHEEL_FORCE_UP_MAX)] =\n      fmaxf(wheel_force_up[0], fmaxf(wheel_force_up[1], fmaxf(wheel_force_up[2], wheel_force_up[3])));\n  output[O_INDEX(WHEEL_FORCE_FWD_MAX)] =\n      fmaxf(wheel_force_fwd[0], fmaxf(wheel_force_fwd[1], fmaxf(wheel_force_fwd[2], wheel_force_fwd[3])));\n  output[O_INDEX(WHEEL_FORCE_SIDE_MAX)] =\n      fmaxf(wheel_force_side[0], fmaxf(wheel_force_side[1], fmaxf(wheel_force_side[2], wheel_force_side[3])));\n}\n\nTEMPLATE_TYPE\n__device__ void TEMPLATE_NAME::step(float* state, float* next_state, float* state_der, float* control, float* output,\n                                    float* theta_s, const float t, const float dt)\n{\n  DYN_PARAMS_T* params_p;\n  SharedBlock* sb;\n  // if (GRANDPARENT_CLASS::SHARED_MEM_REQUEST_GRD_BYTES != 0)\n  // {  // Allows us to turn on or off global or shared memory version of params\n  //   params_p = (DYN_PARAMS_T*)theta_s;\n  // }\n  // else\n  // {\n  //   params_p = &(this->params_);\n  // }\n  params_p = &(this->params_);\n  const int grd_shift = this->SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);  // grid size to shift by\n  const int blk_shift = this->SHARED_MEM_REQUEST_BLK_BYTES * (threadIdx.x + blockDim.x * threadIdx.z) /\n                        sizeof(float);                       // blk size to shift by\n  const int sb_shift = sizeof(SharedBlock) / sizeof(float);  // how much to shift inside a block to lstm values\n  if (this->SHARED_MEM_REQUEST_BLK_BYTES != 0)\n  {\n    float* sb_mem = &theta_s[grd_shift];  // does the grid shift\n    sb = (SharedBlock*)(sb_mem + blk_shift);\n  }\n  computeParametricDelayDeriv(state, control, state_der, params_p);\n  computeParametricAccelDeriv(state, control, state_der, dt, params_p);\n  computeLSTMSteering(state, control, state_der, params_p, theta_s, grd_shift, blk_shift, sb_shift);\n  computeSimpleSuspensionStep(state, state_der, output, params_p, theta_s);\n\n  updateState(state, next_state, state_der, dt);\n  computeUncertaintyPropagation(state, control, state_der, next_state, dt, params_p, sb, theta_s);\n  // if (pi == 0)\n  // {\n  //   float roll = state[S_INDEX(ROLL)];\n  //   float pitch = state[S_INDEX(PITCH)];\n  //   RACER::computeStaticSettling<DYN_PARAMS_T::OutputIndex, TwoDTextureHelper<float>>(\n  //       this->tex_helper_, next_state[S_INDEX(YAW)], next_state[S_INDEX(POS_X)], next_state[S_INDEX(POS_Y)], roll,\n  //       pitch, output);\n  //   next_state[S_INDEX(PITCH)] = pitch;\n  //   next_state[S_INDEX(ROLL)] = roll;\n  // }\n  __syncthreads();\n  // if (threadIdx.x == 0 && blockIdx.x == 0 && threadIdx.y == 0)\n  // {\n  //   printf(\"GPU t: %3d, VEL_Z(t + 1): %f, VEL_Z(t): %f, VEl_Z'(t): %f\\n\", t, next_state[S_INDEX(CG_VEL_I_Z)],\n  //   state[S_INDEX(CG_VEL_I_Z)],\n  //       state_der[S_INDEX(CG_VEL_I_Z)]);\n  // }\n  this->setOutputs(state_der, next_state, output);\n}\n\nTEMPLATE_TYPE\n__device__ void TEMPLATE_NAME::updateState(float* state, float* next_state, float* state_der, const float dt)\n{\n  int i;\n  int tdy, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(tdy, step);\n  // Add the state derivative time dt to the current state.\n  for (i = tdy; i < S_INDEX(STEER_ANGLE_RATE); i += step)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n    if (i == S_INDEX(YAW))\n    {\n      next_state[i] = angle_utils::normalizeAngle(next_state[i]);\n    }\n    if (i == S_INDEX(STEER_ANGLE))\n    {\n      next_state[i] = fmaxf(fminf(next_state[i], this->params_.max_steer_angle), -this->params_.max_steer_angle);\n      next_state[S_INDEX(STEER_ANGLE_RATE)] =\n          state[S_INDEX(STEER_ANGLE_RATE)] + state_der[S_INDEX(STEER_ANGLE_RATE)] * dt;\n    }\n    if (i == S_INDEX(BRAKE_STATE))\n    {\n      next_state[i] = fminf(fmaxf(next_state[i], 0.0f), 1.0f);\n    }\n  }\n}\n\nTEMPLATE_TYPE\nvoid TEMPLATE_NAME::updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                                Eigen::Ref<state_array> state_der, const float dt)\n{\n  // Segmented it to ensure that roll and pitch don't get overwritten\n  for (int i = 0; i < S_INDEX(STEER_ANGLE_RATE); i++)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n  }\n  next_state(S_INDEX(YAW)) = angle_utils::normalizeAngle(next_state(S_INDEX(YAW)));\n  next_state(S_INDEX(STEER_ANGLE)) =\n      fmaxf(fminf(next_state(S_INDEX(STEER_ANGLE)), this->params_.max_steer_angle), -this->params_.max_steer_angle);\n  next_state(S_INDEX(STEER_ANGLE_RATE)) = state(S_INDEX(STEER_ANGLE_RATE)) + state_der(S_INDEX(STEER_ANGLE_RATE)) * dt;\n  next_state(S_INDEX(BRAKE_STATE)) =\n      fminf(fmaxf(next_state(S_INDEX(BRAKE_STATE)), 0.0f), -this->control_rngs_[C_INDEX(THROTTLE_BRAKE)].x);\n}\n\nTEMPLATE_TYPE\n__host__ __device__ void TEMPLATE_NAME::setOutputs(const float* state_der, const float* next_state, float* output)\n{\n  // Setup output\n\n  int step, pi;\n  mp1::getParallel1DIndex<mp1::Parallel1Dir::THREAD_Y>(pi, step);\n  for (int i = pi; i < this->OUTPUT_DIM; i += step)\n  {\n    switch (i)\n    {\n      case O_INDEX(BASELINK_VEL_B_X):\n        output[i] = next_state[S_INDEX(VEL_X)];\n        break;\n      case O_INDEX(BASELINK_VEL_B_Y):\n        output[i] = 0.0f;\n        break;\n      // case O_INDEX(BASELINK_VEL_B_Z):\n      //   output[i] = 0.0f;\n      //   break;\n      case O_INDEX(BASELINK_POS_I_X):\n        output[i] = next_state[S_INDEX(POS_X)];\n        break;\n      case O_INDEX(BASELINK_POS_I_Y):\n        output[i] = next_state[S_INDEX(POS_Y)];\n        break;\n      case O_INDEX(BASELINK_POS_I_Z):\n        output[i] = next_state[S_INDEX(CG_POS_Z)] - next_state[S_INDEX(PITCH)] * (-this->params_.c_g.x);\n        break;\n      case O_INDEX(PITCH):\n        output[i] = next_state[S_INDEX(PITCH)];\n        break;\n      case O_INDEX(ROLL):\n        output[i] = next_state[S_INDEX(ROLL)];\n        break;\n      case O_INDEX(YAW):\n        output[i] = next_state[S_INDEX(YAW)];\n        break;\n      case O_INDEX(STEER_ANGLE):\n        output[i] = next_state[S_INDEX(STEER_ANGLE)];\n        break;\n      case O_INDEX(STEER_ANGLE_RATE):\n        output[i] = next_state[S_INDEX(STEER_ANGLE_RATE)];\n        break;\n      case O_INDEX(ACCEL_X):\n        output[i] = state_der[S_INDEX(VEL_X)];\n        break;\n      case O_INDEX(ACCEL_Y):\n        output[i] = 0.0f;\n        break;\n      case O_INDEX(OMEGA_Z):\n        output[i] = state_der[S_INDEX(YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_YAW_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_YAW_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y_VEL_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_Y_VEL_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_YAW):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_YAW):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X_YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y_YAW):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_Y_YAW)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_X_Y):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_X_Y)];\n        break;\n      case O_INDEX(UNCERTAINTY_POS_Y):\n        output[i] = next_state[S_INDEX(UNCERTAINTY_POS_Y)];\n        break;\n      case O_INDEX(TOTAL_VELOCITY):\n        output[i] = fabsf(next_state[S_INDEX(VEL_X)]);\n        break;\n    }\n  }\n}\n\nTEMPLATE_TYPE\nTEMPLATE_NAME::state_array TEMPLATE_NAME::stateFromMap(const std::map<std::string, float>& map)\n{\n  std::vector<std::string> needed_keys = { \"VEL_X\",      \"VEL_Z\", \"POS_X\", \"POS_Y\", \"POS_Z\",       \"OMEGA_X\",\n                                           \"OMEGA_Y\",    \"ROLL\",  \"PITCH\", \"YAW\",   \"STEER_ANGLE\", \"STEER_ANGLE_RATE\",\n                                           \"BRAKE_STATE\" };\n  std::vector<std::string> uncertainty_keys = { \"UNCERTAINTY_POS_X\",       \"UNCERTAINTY_POS_Y\",\n                                                \"UNCERTAINTY_YAW\",         \"UNCERTAINTY_VEL_X\",\n                                                \"UNCERTAINTY_POS_X_Y\",     \"UNCERTAINTY_POS_X_YAW\",\n                                                \"UNCERTAINTY_POS_X_VEL_X\", \"UNCERTAINTY_POS_Y_YAW\",\n                                                \"UNCERTAINTY_POS_Y_VEL_X\", \"UNCERTAINTY_YAW_VEL_X\" };\n  const bool use_uncertainty = false;\n  if (use_uncertainty)\n  {\n    needed_keys.insert(needed_keys.end(), uncertainty_keys.begin(), uncertainty_keys.end());\n  }\n\n  bool missing_key = false;\n  state_array s = state_array::Zero();\n  for (const auto& key : needed_keys)\n  {\n    if (map.find(key) == map.end())\n    {  // Print out all missing keys\n      std::cout << \"Could not find key \" << key << \" for elevation with simple suspension dynamics.\" << std::endl;\n      missing_key = true;\n    }\n  }\n  if (missing_key)\n  {\n    return state_array::Constant(NAN);\n  }\n\n  s(S_INDEX(POS_X)) = map.at(\"POS_X\");\n  s(S_INDEX(POS_Y)) = map.at(\"POS_Y\");\n  s(S_INDEX(CG_POS_Z)) = map.at(\"POS_Z\");\n  s(S_INDEX(VEL_X)) = map.at(\"VEL_X\");\n  float bl_v_I_z = map.at(\"VEL_Z\") * cosf(map.at(\"PITCH\")) - map.at(\"VEL_X\") * sinf(map.at(\"PITCH\"));\n  s(S_INDEX(CG_VEL_I_Z)) = bl_v_I_z - map.at(\"OMEGA_Y\") * this->params_.c_g.x;\n  s(S_INDEX(STEER_ANGLE)) = map.at(\"STEER_ANGLE\");\n  s(S_INDEX(STEER_ANGLE_RATE)) = map.at(\"STEER_ANGLE_RATE\");\n  s(S_INDEX(ROLL)) = map.at(\"ROLL\");\n  s(S_INDEX(PITCH)) = map.at(\"PITCH\");\n  s(S_INDEX(YAW)) = map.at(\"YAW\");\n  // Set z position to cg's z position in world frame\n  float3 rotation = make_float3(s(S_INDEX(ROLL)), s(S_INDEX(PITCH)), s(S_INDEX(YAW)));\n  float3 world_pose = make_float3(s(S_INDEX(POS_X)), s(S_INDEX(POS_Y)), s(S_INDEX(CG_POS_Z)));\n  float3 cg_in_world_frame;\n  mppi::math::bodyOffsetToWorldPoseEuler(this->params_.c_g, world_pose, rotation, cg_in_world_frame);\n  s(S_INDEX(CG_POS_Z)) = cg_in_world_frame.z;\n  s(S_INDEX(ROLL_RATE)) = map.at(\"OMEGA_X\");\n  s(S_INDEX(PITCH_RATE)) = map.at(\"OMEGA_Y\");\n  s(S_INDEX(BRAKE_STATE)) = map.at(\"BRAKE_STATE\");\n\n  if (use_uncertainty)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_X)) = map.at(\"UNCERTAINTY_POS_X\");\n    s(S_INDEX(UNCERTAINTY_POS_Y)) = map.at(\"UNCERTAINTY_POS_Y\");\n    s(S_INDEX(UNCERTAINTY_YAW)) = map.at(\"UNCERTAINTY_YAW\");\n    s(S_INDEX(UNCERTAINTY_VEL_X)) = map.at(\"UNCERTAINTY_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_POS_X_Y)) = map.at(\"UNCERTAINTY_POS_X_Y\");\n    s(S_INDEX(UNCERTAINTY_POS_X_YAW)) = map.at(\"UNCERTAINTY_POS_X_YAW\");\n    s(S_INDEX(UNCERTAINTY_POS_X_VEL_X)) = map.at(\"UNCERTAINTY_POS_X_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_POS_Y_YAW)) = map.at(\"UNCERTAINTY_POS_Y_YAW\");\n    s(S_INDEX(UNCERTAINTY_POS_Y_VEL_X)) = map.at(\"UNCERTAINTY_POS_Y_VEL_X\");\n    s(S_INDEX(UNCERTAINTY_YAW_VEL_X)) = map.at(\"UNCERTAINTY_YAW_VEL_X\");\n  }\n  float eps = 1e-6f;\n  if (s(S_INDEX(UNCERTAINTY_POS_X)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_X)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_POS_Y)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_POS_Y)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_YAW)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_YAW)) = eps;\n  }\n  if (s(S_INDEX(UNCERTAINTY_VEL_X)) < eps)\n  {\n    s(S_INDEX(UNCERTAINTY_VEL_X)) = eps;\n  }\n  return s;\n}\n#undef TEMPLATE_NAME\n#undef TEMPLATE_TYPE\n"
  },
  {
    "path": "include/mppi/dynamics/racer_dubins/racer_dubins_elevation_suspension_lstm.cuh",
    "content": "//\n// Created by Bogdan on 02/21/2024\n//\n#pragma once\n\n#include \"racer_dubins_elevation_lstm_steering.cuh\"\n\n#ifndef W_INDEX\n#define W_IND_CLASS(CLASS, enum_val) E_INDEX(CLASS::WheelIndex, enum_val)\n#define W_INDEX(enum_val) W_IND_CLASS(PARENT_CLASS::DYN_PARAMS_T, enum_val)\n#define W_INDEX_NEWLY_CREATED true\n#endif\n\nstruct RacerDubinsElevationSuspensionParams : public RacerDubinsElevationParams\n{\n  enum class WheelIndex : int\n  {\n    FL = 0,\n    FR,\n    BL,\n    BR,\n    NUM_WHEELS,\n  };\n\n  enum class StateIndex : int\n  {\n    VEL_X = 0,\n    YAW,\n    POS_X,\n    POS_Y,\n    STEER_ANGLE,\n    BRAKE_STATE,\n    ROLL,\n    PITCH,\n    CG_POS_Z,\n    CG_VEL_I_Z,\n    ROLL_RATE,\n    PITCH_RATE,\n    STEER_ANGLE_RATE,\n    UNCERTAINTY_POS_X,\n    UNCERTAINTY_POS_Y,\n    UNCERTAINTY_YAW,\n    UNCERTAINTY_VEL_X,\n    UNCERTAINTY_POS_X_Y,\n    UNCERTAINTY_POS_X_YAW,\n    UNCERTAINTY_POS_X_VEL_X,\n    UNCERTAINTY_POS_Y_YAW,\n    UNCERTAINTY_POS_Y_VEL_X,\n    UNCERTAINTY_YAW_VEL_X,\n    FILLER_1,\n    NUM_STATES\n  };\n\n  float spring_k = 14000.0f;                              // [N / m]\n  float drag_c = 1000.0f;                                 // [N * s / m]\n  float mass = 1447.0f;                                   // [kg]\n  float I_xx = 1.0f / 12 * mass * 2 * SQ(1.5f);           // [kg * m^2]\n  float I_yy = 1.0f / 12 * mass * (SQ(1.5f) + SQ(3.0f));  // [kg * m^2]\n  float wheel_radius = 0.32f;                             // [m]\n\n  // Cost force threshold on the order of 3000 N\n  // TODO Figure out Center of Gravity\n  float3 c_g = make_float3(2.981f * 0.5f, 0.0f, 0.0f);\n};\n\ntemplate <class CLASS_T, class PARAMS_T = RacerDubinsElevationSuspensionParams>\nclass RacerDubinsElevationSuspensionImpl : public RacerDubinsElevationLSTMSteeringImpl<CLASS_T, PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = RacerDubinsElevationLSTMSteeringImpl<CLASS_T, PARAMS_T>;\n  using GRANDPARENT_CLASS = typename PARENT_CLASS::PARENT_CLASS;\n  using DYN_PARAMS_T = typename PARENT_CLASS::DYN_PARAMS_T;\n  using SharedBlock = typename PARENT_CLASS::SharedBlock;\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n  using output_array = typename PARENT_CLASS::output_array;\n  using buffer_trajectory = typename PARENT_CLASS::buffer_trajectory;\n  using PARENT_CLASS::computeParametricAccelDeriv;\n  using PARENT_CLASS::computeParametricDelayDeriv;\n  using PARENT_CLASS::computeUncertaintyPropagation;\n\n  RacerDubinsElevationSuspensionImpl(LSTMLSTMConfig config, cudaStream_t stream)\n    : RacerDubinsElevationSuspensionImpl(config.init_config.input_dim, config.init_config.hidden_dim,\n                                         config.init_config.output_layers, config.pred_config.input_dim,\n                                         config.pred_config.hidden_dim, config.pred_config.output_layers,\n                                         config.init_len, stream)\n  {\n  }\n\n  RacerDubinsElevationSuspensionImpl(int init_input_dim, int init_hidden_dim, std::vector<int>& init_output_layers,\n                                     int input_dim, int hidden_dim, std::vector<int>& output_layers, int init_len,\n                                     cudaStream_t stream);\n  RacerDubinsElevationSuspensionImpl(std::string path, cudaStream_t stream = nullptr);\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"RACER Dubins LSTM Steering and Suspension Model\";\n  }\n\n  void bindToStream(cudaStream_t stream);\n\n  void paramsToDevice();\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  void computeSimpleSuspensionStep(Eigen::Ref<state_array> state, Eigen::Ref<state_array> state_der,\n                                   Eigen::Ref<output_array> output);\n\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array>& control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  __device__ void computeSimpleSuspensionStep(float* state, float* state_der, float* output, DYN_PARAMS_T* params_p,\n                                              float* theta_s);\n\n  __device__ inline void step(float* state, float* next_state, float* state_der, float* control, float* output,\n                              float* theta_s, const float t, const float dt);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n\n  __host__ __device__ void setOutputs(const float* state_der, const float* next_state, float* output);\n\n  TwoDTextureHelper<float4>* getTextureHelperNormals()\n  {\n    return normals_tex_helper_;\n  }\n\n  void updateRotation(std::array<float3, 3>& rotation)\n  {\n    this->tex_helper_->updateRotation(0, rotation);\n    this->normals_tex_helper_->updateRotation(0, rotation);\n  }\n\n  state_array stateFromMap(const std::map<std::string, float>& map);\n\nprotected:\n  TwoDTextureHelper<float4>* normals_tex_helper_ = nullptr;\n  RacerDubinsElevationSuspensionImpl(cudaStream_t stream = nullptr) : PARENT_CLASS(stream)\n  {\n    normals_tex_helper_ = new TwoDTextureHelper<float4>(1, stream);\n  }\n};\n\nclass RacerDubinsElevationSuspension : public RacerDubinsElevationSuspensionImpl<RacerDubinsElevationSuspension>\n{\npublic:\n  using PARENT_CLASS = RacerDubinsElevationSuspensionImpl<RacerDubinsElevationSuspension>;\n\n  RacerDubinsElevationSuspension(int init_input_dim, int init_hidden_dim, std::vector<int>& init_output_layers,\n                                 int input_dim, int hidden_dim, std::vector<int>& output_layers, int init_len,\n                                 cudaStream_t stream = nullptr)\n    : PARENT_CLASS(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers, init_len,\n                   stream)\n  {\n  }\n  RacerDubinsElevationSuspension(std::string path, cudaStream_t stream = 0) : PARENT_CLASS(path, stream)\n  {\n  }\n\nprotected:\n  // protected constructor so you cannot init without a network setup properly\n  RacerDubinsElevationSuspension(cudaStream_t stream = 0) : PARENT_CLASS(stream = 0)\n  {\n  }\n};\n\n#ifdef __CUDACC__\n#include \"racer_dubins_elevation_suspension_lstm.cu\"\n#endif\n\n#if W_INDEX_NEWLY_CREATED\n#undef W_IND_CLASS\n#undef W_IND\n#undef W_INDEX\n#endif\n"
  },
  {
    "path": "include/mppi/dynamics/racer_suspension/racer_suspension.cu",
    "content": "#include <mppi/dynamics/racer_suspension/racer_suspension.cuh>\n#include <mppi/utils/eigen_type_conversions.h>\n#include <mppi/utils/math_utils.h>\n\nvoid RacerSuspension::GPUSetup()\n{\n  auto* derived = static_cast<PARENT_CLASS*>(this);\n  tex_helper_->GPUSetup();\n  derived->GPUSetup();\n}\n\nvoid RacerSuspension::freeCudaMem()\n{\n  tex_helper_->freeCudaMem();\n}\n\nvoid RacerSuspension::paramsToDevice()\n{\n  if (this->GPUMemStatus_)\n  {\n    // does all the internal texture updates\n    tex_helper_->copyToDevice();\n    // makes sure that the device ptr sees the correct texture object\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->model_d_->tex_helper_), &(tex_helper_->ptr_d_),\n                                 sizeof(TwoDTextureHelper<float>*), cudaMemcpyHostToDevice, this->stream_));\n  }\n  PARENT_CLASS::paramsToDevice();\n}\n\n// combined computeDynamics & updateState\nvoid RacerSuspension::step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state,\n                           Eigen::Ref<state_array> state_der, const Eigen::Ref<const control_array> control,\n                           Eigen::Ref<output_array> output, const float t, const float dt)\n{\n  Eigen::Matrix3f omegaJac;\n  computeStateDeriv(state, control, state_der, output, &omegaJac);\n  // approximate implicit euler for angular rate states\n  Eigen::Vector3f deltaOmega =\n      (Eigen::Matrix3f::Identity() - dt * omegaJac).inverse() * dt * state_der.segment<3>(S_INDEX(OMEGA_B_X));\n  state_array delta_x = state_der * dt;\n  delta_x.segment<3>(S_INDEX(OMEGA_B_X)) = deltaOmega;\n  next_state = state + delta_x;\n  float q_norm = next_state.segment<4>(S_INDEX(ATTITUDE_QW)).norm();\n  next_state.segment<4>(S_INDEX(ATTITUDE_QW)) /= q_norm;\n}\n\nvoid RacerSuspension::updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                                  Eigen::Ref<state_array> state_der, const float dt)\n{\n  next_state = state + state_der * dt;\n  float q_norm = next_state.segment<4>(S_INDEX(ATTITUDE_QW)).norm();\n  next_state.segment<4>(S_INDEX(ATTITUDE_QW)) /= q_norm;\n}\n\n__device__ void RacerSuspension::updateState(float* state, float* next_state, float* state_der, const float dt)\n{\n  unsigned int i;\n  unsigned int tdy = threadIdx.y;\n  // Add the state derivative time dt to the current state.\n  // printf(\"updateState thread %d, %d = %f, %f\\n\", threadIdx.x, threadIdx.y, state[0], state_der[0]);\n  for (i = tdy; i < PARENT_CLASS::STATE_DIM; i += blockDim.y)\n  {\n    next_state[i] = state[i] + state_der[i] * dt;\n  }\n  __syncthreads();\n  if (tdy == 0)\n  {\n    float norm = sqrtf(powf(next_state[S_INDEX(ATTITUDE_QW)], 2) + powf(next_state[S_INDEX(ATTITUDE_QX)], 2) +\n                       powf(next_state[S_INDEX(ATTITUDE_QY)], 2) + powf(next_state[S_INDEX(ATTITUDE_QZ)], 2));\n    next_state[S_INDEX(ATTITUDE_QW)] /= norm;\n    next_state[S_INDEX(ATTITUDE_QX)] /= norm;\n    next_state[S_INDEX(ATTITUDE_QY)] /= norm;\n    next_state[S_INDEX(ATTITUDE_QZ)] /= norm;\n  }\n}\n\n__device__ __host__ static float stribeck_friction(float v, float mu_s, float v_slip, float& partial_mu_partial_v)\n{\n  float mu = v / v_slip * mu_s;\n  partial_mu_partial_v = 0;\n  if (mu > mu_s)\n  {\n    return mu_s;\n  }\n  if (mu < -mu_s)\n  {\n    return -mu_s;\n  }\n  partial_mu_partial_v = mu_s / v_slip;\n  return mu;\n}\n\n__device__ __host__ void RacerSuspension::computeStateDeriv(const Eigen::Ref<const state_array>& state,\n                                                            const Eigen::Ref<const control_array>& control,\n                                                            Eigen::Ref<state_array> state_der,\n                                                            Eigen::Ref<output_array> output,\n                                                            Eigen::Matrix3f* omegaJacobian)\n{\n  Eigen::Vector3f p_I = state.segment<3>(S_INDEX(P_I_X));\n  Eigen::Vector3f v_I = state.segment<3>(S_INDEX(V_I_X));\n  Eigen::Vector3f omega = state.segment<3>(S_INDEX(OMEGA_B_X));\n  Eigen::Quaternionf q(state[S_INDEX(ATTITUDE_QW)], state[S_INDEX(ATTITUDE_QX)], state[S_INDEX(ATTITUDE_QY)],\n                       state[S_INDEX(ATTITUDE_QZ)]);\n  Eigen::Matrix3f R = q.toRotationMatrix();\n  float tan_delta = tan(state[S_INDEX(STEER_ANGLE)]);\n  Eigen::Matrix3f Rdot = R * mppi::math::skewSymmetricMatrix(omega);\n\n  // linear engine model\n  float vel_x = (R.transpose() * v_I)[0];\n  float throttle = max(0.0, control[C_INDEX(THROTTLE_BRAKE)]);\n  float brake = max(0.0, -control[C_INDEX(THROTTLE_BRAKE)]);\n  float acc = params_.c_t * throttle - copysign(params_.c_b * brake, vel_x) - params_.c_v * vel_x + params_.c_0;\n  float propulsion_force = params_.mass * acc;\n\n  Eigen::Vector3f f_B = Eigen::Vector3f::Zero();\n  Eigen::Vector3f tau_B = Eigen::Vector3f::Zero();\n  Eigen::Matrix3f tau_B_jac = Eigen::Matrix3f::Zero();\n  Eigen::Vector3f f_r_B[4];\n  // for each wheel\n  for (int i = 0; i < 4; i++)\n  {\n    // compute suspension from elevation map\n    Eigen::Vector3f p_w_nom_B_i =\n        cudaToEigen(params_.wheel_pos_wrt_base_link[i]) - cudaToEigen(params_.cg_pos_wrt_base_link);\n    Eigen::Vector3f p_w_nom_I_i = p_I + (R * p_w_nom_B_i).eval();\n    float h_i = 0;\n    Eigen::Vector3f n_I_i(0, 0, 1);\n    // TODO: Enable elevation map querying\n    // if (tex_helper_->checkTextureUse(TEXTURE_ELEVATION_MAP))\n    // {\n    //   float4 wheel_elev = tex_helper_->queryTextureAtWorldPose(TEXTURE_ELEVATION_MAP, EigenToCuda(p_w_nom_I_i));\n    //   h_i = wheel_elev.w;\n    //   // std::cout << \"h_i \" << h_i << std::endl;\n    //   n_I_i = Eigen::Vector3f(wheel_elev.x, wheel_elev.y, wheel_elev.z);\n    // }\n    Eigen::Vector3f p_c_I_i(p_w_nom_I_i[0], p_w_nom_I_i[1], h_i);\n    float l_i = p_w_nom_I_i[2] - p_c_I_i[2];\n    Eigen::Vector3f p_dot_w_nom_I_i = v_I + (Rdot * p_w_nom_B_i).eval();\n    Eigen::Matrix3f p_dot_w_nom_I_i_Jac = R * Eigen::Matrix3f::Identity().colwise().cross(p_w_nom_B_i);\n    float h_dot_i = -n_I_i[0] / n_I_i[2] * p_dot_w_nom_I_i[0] - n_I_i[1] / n_I_i[2] * p_dot_w_nom_I_i[1];\n    Eigen::RowVector3f h_dot_i_Jac = (-n_I_i[0] / n_I_i[2] * p_dot_w_nom_I_i_Jac.row(0)).eval() -\n                                     (n_I_i[1] / n_I_i[2] * p_dot_w_nom_I_i_Jac.row(1)).eval();\n    float l_dot_i = p_dot_w_nom_I_i[2] - h_dot_i;\n    Eigen::RowVector3f l_dot_i_Jac = p_dot_w_nom_I_i_Jac.row(2) - h_dot_i_Jac;\n\n    float f_k_i = -params_.k_s[i] * (l_i - params_.l_0[i]) - params_.c_s[i] * l_dot_i;\n    Eigen::RowVector3f f_k_i_Jac = -params_.c_s[i] * l_dot_i_Jac;\n    if (f_k_i < 0)\n    {\n      f_k_i = 0;\n      f_k_i_Jac = Eigen::RowVector3f::Zero();\n    }\n\n    // contact frame\n    Eigen::Vector3f p_dot_c_I_i(p_dot_w_nom_I_i[0], p_dot_w_nom_I_i[1], h_dot_i);\n    Eigen::Matrix3f p_dot_c_I_i_Jac;\n    p_dot_c_I_i_Jac.row(0) = p_dot_w_nom_I_i_Jac.row(0);\n    p_dot_c_I_i_Jac.row(1) = p_dot_w_nom_I_i_Jac.row(1);\n    p_dot_c_I_i_Jac.row(2) = h_dot_i_Jac;\n    float delta_i;\n    if (i == RacerSuspensionParams::WHEEL_FRONT_LEFT)\n    {\n      delta_i = atan(params_.wheel_base * tan_delta / (params_.wheel_base - tan_delta * params_.width / 2));\n    }\n    else if (i == RacerSuspensionParams::WHEEL_FRONT_RIGHT)\n    {\n      delta_i = atan(params_.wheel_base * tan_delta / (params_.wheel_base + tan_delta * params_.width / 2));\n    }\n    else\n    {  // rear wheels\n      delta_i = 0;\n    }\n\n    Eigen::Vector3f n_B_i = R.transpose() * n_I_i;\n    Eigen::Vector3f wheel_dir_B_i(cos(delta_i), sin(delta_i), 0);\n    Eigen::Vector3f s_B_i = n_B_i.cross(wheel_dir_B_i).normalized();\n    Eigen::Vector3f t_B_i = s_B_i.cross(n_B_i);\n    Eigen::Matrix3f R_C_i_to_B;\n    R_C_i_to_B.col(0) = t_B_i;\n    R_C_i_to_B.col(1) = s_B_i;\n    R_C_i_to_B.col(2) = n_B_i;\n\n    // contact velocity\n    Eigen::Vector3f p_dot_c_B_i = R.transpose() * p_dot_c_I_i;\n    Eigen::Matrix3f p_dot_c_B_i_Jac = R.transpose() * p_dot_c_I_i_Jac;\n    float v_w_t_i = t_B_i.dot(p_dot_c_B_i);\n    float v_w_s_i = s_B_i.dot(p_dot_c_B_i);\n    Eigen::RowVector3f v_w_s_i_Jac = s_B_i.transpose() * p_dot_c_B_i_Jac;\n\n    // compute contact forces\n    float f_n_i = f_k_i;\n    float partial_mu_s_partial_v;\n    float mu_s = stribeck_friction(v_w_s_i, params_.mu, params_.v_slip, partial_mu_s_partial_v);\n    float f_s_i = -mu_s * f_n_i;\n    float f_t_i = max(-params_.mu * f_n_i, min(propulsion_force / 4, params_.mu * f_n_i));\n    Eigen::RowVector3f f_n_i_Jac = f_k_i_Jac;\n    Eigen::RowVector3f f_t_i_Jac = Eigen::RowVector3f::Zero();\n    if (propulsion_force / 4 > params_.mu * f_n_i)\n    {\n      f_t_i_Jac = params_.mu * f_n_i_Jac;\n    }\n    else if (propulsion_force / 4 < -params_.mu * f_n_i)\n    {\n      f_t_i_Jac = -params_.mu * f_n_i_Jac;\n    }\n    Eigen::RowVector3f f_s_i_Jac = (-f_n_i * partial_mu_s_partial_v * v_w_s_i_Jac).eval() - (mu_s * f_n_i_Jac).eval();\n\n    // contact force & location\n    Eigen::Vector3f f_r_C_i(f_t_i, f_s_i, f_n_i);\n    Eigen::Matrix3f f_r_C_i_Jac;\n    f_r_C_i_Jac.row(0) = f_t_i_Jac;\n    f_r_C_i_Jac.row(1) = f_s_i_Jac;\n    f_r_C_i_Jac.row(2) = f_n_i_Jac;\n    f_r_B[i] = R_C_i_to_B * f_r_C_i;\n    Eigen::Matrix3f f_r_B_i_Jac = R_C_i_to_B = f_r_C_i_Jac;\n    Eigen::Vector3f p_c_B_i = R.transpose() * (p_c_I_i - p_I).eval();\n\n    // accumulate forces & moments\n    f_B += f_r_B[i];\n    tau_B += (p_c_B_i.cross(f_r_B[i])).eval();\n    tau_B_jac += -f_r_B_i_Jac.colwise().cross(p_c_B_i);\n\n    if (output.data() != nullptr)\n    {\n      output[O_INDEX(WHEEL_POS_I_FL_X) + i * 2] = p_w_nom_I_i[0];\n      output[O_INDEX(WHEEL_POS_I_FL_Y) + i * 2] = p_w_nom_I_i[1];\n      const float force_magn = sqrtf(powf(f_r_B[i][0], 2.0f) + powf(f_r_B[i][1], 2.0f) + powf(f_r_B[i][2], 2.0f));\n      output[O_INDEX(WHEEL_FORCE_B_FL) + i] = force_magn;\n    }\n  }\n\n  Eigen::Vector3f g(0, 0, params_.gravity);  // TODO gravity is negative to match dubins model\n\n  state_der.segment<3>(S_INDEX(P_I_X)) = v_I;\n  state_der.segment<3>(S_INDEX(V_I_X)) = (1 / params_.mass * R * f_B).eval() + g;\n  Eigen::Quaternionf qdot;\n  qdot.coeffs() = 0.5 * (q * Eigen::Quaternionf(0, omega[0], omega[1], omega[2])).coeffs();\n  state_der[S_INDEX(ATTITUDE_QW)] = qdot.w();\n  state_der[S_INDEX(ATTITUDE_QX)] = qdot.x();\n  state_der[S_INDEX(ATTITUDE_QY)] = qdot.y();\n  state_der[S_INDEX(ATTITUDE_QZ)] = qdot.z();\n  Eigen::Vector3f J_diag(params_.Jxx, params_.Jyy, params_.Jzz);\n  Eigen::Vector3f J_inv_diag(1.0 / params_.Jxx, 1.0 / params_.Jyy, 1.0 / params_.Jzz);\n  state_der.segment<3>(S_INDEX(OMEGA_B_X)) = J_inv_diag.cwiseProduct(J_diag.cwiseProduct(omega).cross(omega) + tau_B);\n  if (omegaJacobian)\n  {\n    Eigen::Matrix3f J = J_diag.asDiagonal();\n    Eigen::Matrix3f Jwxw_jac =\n        J.colwise().cross(omega) - Eigen::Matrix3f::Identity().colwise().cross(J_diag.cwiseProduct(omega));\n    *omegaJacobian = J_inv_diag.asDiagonal() * (Jwxw_jac + tau_B_jac).eval();\n  }\n\n  // Steering actuator model\n  float steer = control[C_INDEX(STEER_CMD)] / params_.steer_command_angle_scale;\n  state_der[S_INDEX(STEER_ANGLE)] = params_.steering_constant * (steer - state[S_INDEX(STEER_ANGLE)]);\n\n  if (output.data() != nullptr)\n  {\n    Eigen::Vector3f COM_v_B = R.transpose() * v_I;\n    Eigen::Vector3f p_base_link_in_B = -cudaToEigen(params_.cg_pos_wrt_base_link);\n    Eigen::Vector3f base_link_v_B = COM_v_B + omega.cross(p_base_link_in_B);\n    output[O_INDEX(BASELINK_VEL_B_X)] = base_link_v_B[0];\n    output[O_INDEX(BASELINK_VEL_B_X)] = base_link_v_B[1];\n    output[O_INDEX(BASELINK_VEL_B_X)] = base_link_v_B[2];\n    Eigen::Vector3f base_link_p_I = p_I + (R * p_base_link_in_B).eval();\n    output[O_INDEX(BASELINK_POS_I_X)] = base_link_p_I[0];\n    output[O_INDEX(BASELINK_POS_I_Y)] = base_link_p_I[1];\n    output[O_INDEX(BASELINK_POS_I_Z)] = base_link_p_I[2];\n    float roll, pitch, yaw;\n    mppi::math::Quat2EulerNWU(q, roll, pitch, yaw);\n    output[O_INDEX(YAW)] = yaw;\n    output[O_INDEX(PITCH)] = pitch;\n    output[O_INDEX(ROLL)] = roll;\n    output[O_INDEX(STEER_ANGLE)] = state[S_INDEX(STEER_ANGLE)];\n    output[O_INDEX(STEER_ANGLE_RATE)] = state_der[S_INDEX(STEER_ANGLE)];\n    // output[O_INDEX(CENTER_POS_I_X)] = output[O_INDEX(BASELINK_POS_I_X)];  // TODO\n    // output[O_INDEX(CENTER_POS_I_Y)] = output[O_INDEX(BASELINK_POS_I_Y)];\n    // output[O_INDEX(CENTER_POS_I_Z)] = output[O_INDEX(BASELINK_POS_I_Z)];\n    output[O_INDEX(ACCEL_X)] = 0;  // TODO: fill in with proper accel_x\n    // #ifdef __CUDA_ARCH__\n    //     if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 &&\n    //         blockIdx.z == 0)\n    //     {\n    //       printf(\"GPU: \");\n    // #else\n    //       printf(\"CPU: \");\n    // #endif\n    //       printf(\"Output: \");\n    //       for (int j = 0; j < DYN_T::OUTPUT_DIM; j++)\n    //       {\n    //         printf(\"%6.2f, \", output[j]);\n    //       }\n    //       printf(\"\\n\");\n    // #ifdef __CUDA_ARCH__\n    //     }\n    // #endif\n  }\n}\n\n__device__ void RacerSuspension::step(float* state, float* next_state, float* state_der, float* control, float* output,\n                                      float* theta_s, const float t, const float dt)\n{\n  computeStateDeriv(state, control, state_der, theta_s, output);\n  __syncthreads();\n  updateState(state, next_state, state_der, dt);\n}\n\n__device__ void RacerSuspension::computeStateDeriv(float* state, float* control, float* state_der, float* theta_s,\n                                                   float* output)\n{\n  Eigen::Map<state_array> state_v(state);\n  Eigen::Map<control_array> control_v(control);\n  Eigen::Map<state_array> state_der_v(state_der);\n  if (output)\n  {\n    Eigen::Map<output_array> output_v(output);\n    // Eigen::Ref<output_array> output_r(output_v);\n    computeStateDeriv(state_v, control_v, state_der_v, output_v);\n  }\n  else\n  {\n    Eigen::Map<output_array> output_v(nullptr);\n    computeStateDeriv(state_v, control_v, state_der_v, output_v);\n  }\n  // for (int i = 0; i < PARENT_CLASS::STATE_DIM; i++)\n  // {\n  //   state_der[i] = 0;\n  // }\n  // state_der[S_INDEX(V_I_X)] = control[1];\n  // if (output)\n  // {\n  //   for (int i = 0; i < PARENT_CLASS::OUTPUT_DIM; i++)\n  //   {\n  //     output[i] = 0;\n  //   }\n  // }\n}\n\nEigen::Quaternionf RacerSuspension::attitudeFromState(const Eigen::Ref<const state_array>& state)\n{\n  return Eigen::Quaternionf(state[S_INDEX(ATTITUDE_QW)], state[S_INDEX(ATTITUDE_QX)], state[S_INDEX(ATTITUDE_QY)],\n                            state[S_INDEX(ATTITUDE_QZ)]);\n}\n\nEigen::Vector3f RacerSuspension::positionFromState(const Eigen::Ref<const state_array>& state)\n{\n  Eigen::Vector3f p_COM = state.segment<3>(S_INDEX(P_I_X));\n  Eigen::Quaternionf q = attitudeFromState(state);\n  return p_COM - q * cudaToEigen(params_.cg_pos_wrt_base_link);\n}\n\nEigen::Vector3f RacerSuspension::velocityFromState(const Eigen::Ref<const state_array>& state)\n{\n  Eigen::Vector3f COM_v_I = state.segment<3>(S_INDEX(V_I_X));\n  Eigen::Quaternionf q_B_to_I = attitudeFromState(state);\n  Eigen::Vector3f COM_v_B = q_B_to_I.conjugate() * COM_v_I;\n  Eigen::Vector3f omega = state.segment<3>(S_INDEX(OMEGA_B_X));\n  Eigen::Vector3f p_base_link_in_B = -cudaToEigen(params_.cg_pos_wrt_base_link);\n  Eigen::Vector3f base_link_v_B = COM_v_B + omega.cross(p_base_link_in_B);\n  return base_link_v_B;\n}\n\nEigen::Vector3f RacerSuspension::angularRateFromState(const Eigen::Ref<const state_array>& state)\n{\n  return state.segment<3>(S_INDEX(OMEGA_B_X));\n}\n\nRacerSuspension::state_array RacerSuspension::stateFromOdometry(const Eigen::Quaternionf& q_B_to_I,\n                                                                const Eigen::Vector3f& pos_base_link_I,\n                                                                const Eigen::Vector3f& vel_base_link_B,\n                                                                const Eigen::Vector3f& omega_B)\n{\n  state_array s;\n  s.setZero();\n  s[S_INDEX(ATTITUDE_QW)] = q_B_to_I.w();\n  s[S_INDEX(ATTITUDE_QX)] = q_B_to_I.x();\n  s[S_INDEX(ATTITUDE_QY)] = q_B_to_I.y();\n  s[S_INDEX(ATTITUDE_QZ)] = q_B_to_I.z();\n  s.segment<3>(S_INDEX(OMEGA_B_X)) = omega_B;\n  Eigen::Vector3f p_COM_wrt_base_link = cudaToEigen(params_.cg_pos_wrt_base_link);\n  Eigen::Vector3f p_I = pos_base_link_I + q_B_to_I * p_COM_wrt_base_link;\n  s.segment<3>(S_INDEX(P_I_X)) = p_I;\n  Eigen::Vector3f COM_v_B = vel_base_link_B + omega_B.cross(p_COM_wrt_base_link);\n  Eigen::Vector3f COM_v_I = q_B_to_I * COM_v_B;\n  s.segment<3>(S_INDEX(V_I_X)) = COM_v_I;\n  return s;\n}\n\nvoid RacerSuspension::enforceLeash(const Eigen::Ref<const state_array>& state_true,\n                                   const Eigen::Ref<const state_array>& state_nominal,\n                                   const Eigen::Ref<const state_array>& leash_values,\n                                   Eigen::Ref<state_array> state_output)\n{\n  // update state_output for leash, need to handle x and y positions specially, convert to body frame and leash in body\n  // frame. transform x and y into body frame\n  float dx = state_nominal[S_INDEX(P_I_X)] - state_true[S_INDEX(P_I_X)];\n  float dy = state_nominal[S_INDEX(P_I_Y)] - state_true[S_INDEX(P_I_Y)];\n  float roll, pitch, yaw;\n  Eigen::Quaternionf q(state_true[S_INDEX(ATTITUDE_QW)], state_true[S_INDEX(ATTITUDE_QX)],\n                       state_true[S_INDEX(ATTITUDE_QY)], state_true[S_INDEX(ATTITUDE_QZ)]);\n  mppi::math::Quat2EulerNWU(q, roll, pitch, yaw);\n  float dx_body = dx * cos(yaw) + dy * sin(yaw);\n  float dy_body = -dx * sin(yaw) + dy * cos(yaw);\n\n  // determine leash in body frame\n  float x_leash = leash_values[S_INDEX(P_I_X)];\n  float y_leash = leash_values[S_INDEX(P_I_Y)];\n  dx_body = fminf(fmaxf(dx_body, -x_leash), x_leash);\n  dy_body = fminf(fmaxf(dy_body, -y_leash), y_leash);\n\n  // transform back to map frame\n  dx = dx_body * cos(yaw) + -dy_body * sin(yaw);\n  dy = dx_body * sin(yaw) + dy_body * cos(yaw);\n\n  // apply leash\n  state_output[S_INDEX(P_I_X)] += dx;\n  state_output[S_INDEX(P_I_Y)] += dy;\n\n  // TODO: Figure out leash for quaternion?\n\n  // handle leash for rest of states\n  float diff;\n  for (int i = 0; i < STATE_DIM; i++)\n  {\n    // use body x and y for leash\n    if (i == S_INDEX(P_I_X) || i == S_INDEX(P_I_Y) || i == S_INDEX(ATTITUDE_QW) || i == S_INDEX(ATTITUDE_QX) ||\n        i == S_INDEX(ATTITUDE_QY) || i == S_INDEX(ATTITUDE_QZ))\n    {\n      // handle outside for loop\n      continue;\n    }\n    else\n    {\n      diff = fabsf(state_nominal[i] - state_true[i]);\n    }\n\n    if (leash_values[i] < diff)\n    {\n      float leash_dir = fminf(fmaxf(state_nominal[i] - state_true[i], -leash_values[i]), leash_values[i]);\n      state_output[i] = state_true[i] + leash_dir;\n    }\n    else\n    {\n      state_output[i] = state_nominal[i];\n    }\n  }\n}\n\nRacerSuspension::state_array RacerSuspension::stateFromMap(const std::map<std::string, float>& map)\n{\n  state_array s;\n  // TODO\n  return s;\n}\n"
  },
  {
    "path": "include/mppi/dynamics/racer_suspension/racer_suspension.cuh",
    "content": "#ifndef MPPIGENERIC_RACER_SUSPENSION_CUH\n#define MPPIGENERIC_RACER_SUSPENSION_CUH\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/utils/texture_helpers/two_d_texture_helper.cuh>\n#include <mppi/utils/angle_utils.cuh>\n\nstruct RacerSuspensionParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    P_I_X = 0,\n    P_I_Y,\n    P_I_Z,\n    ATTITUDE_QW,\n    ATTITUDE_QX,\n    ATTITUDE_QY,\n    ATTITUDE_QZ,\n    V_I_X,\n    V_I_Y,\n    V_I_Z,\n    OMEGA_B_X,\n    OMEGA_B_Y,\n    OMEGA_B_Z,\n    STEER_ANGLE,\n    NUM_STATES\n  };\n\n  enum class ControlIndex : int\n  {\n    THROTTLE_BRAKE = 0,\n    STEER_CMD,\n    NUM_CONTROLS\n  };\n\n  enum class OutputIndex : int\n  {\n    BASELINK_VEL_B_X = 0,\n    BASELINK_VEL_B_Y,\n    BASELINK_VEL_B_Z,\n    BASELINK_POS_I_X,\n    BASELINK_POS_I_Y,\n    BASELINK_POS_I_Z,\n    YAW,\n    ROLL,\n    PITCH,\n    STEER_ANGLE,\n    STEER_ANGLE_RATE,\n    WHEEL_POS_I_FL_X,\n    WHEEL_POS_I_FL_Y,\n    WHEEL_POS_I_FR_X,\n    WHEEL_POS_I_FR_Y,\n    WHEEL_POS_I_RL_X,\n    WHEEL_POS_I_RL_Y,\n    WHEEL_POS_I_RR_X,\n    WHEEL_POS_I_RR_Y,\n    WHEEL_FORCE_B_FL,\n    WHEEL_FORCE_B_FR,\n    WHEEL_FORCE_B_RL,\n    WHEEL_FORCE_B_RR,\n    ACCEL_X,\n    ACCEL_Y,\n    OMEGA_Z,\n    NUM_OUTPUTS\n  };\n  // suspension model params\n  float wheel_radius = 0.32;\n  float mass = 1447;\n  float wheel_base = 2.981;\n  float width = 1.5;\n  float height = 1.5;\n  float gravity = -9.81;\n  float k_s[4] = { 14000, 14000, 14000, 14000 };\n  float c_s[4] = { 2000, 2000, 2000, 2000 };\n  float l_0[4] = {\n    wheel_radius + mass / 4 * (-gravity) / k_s[0],\n    wheel_radius + mass / 4 * (-gravity) / k_s[1],\n    wheel_radius + mass / 4 * (-gravity) / k_s[2],\n    wheel_radius + mass / 4 * (-gravity) / k_s[3],\n  };\n  float3 cg_pos_wrt_base_link = make_float3(wheel_base / 2, 0, 0.2);\n  // float3 wheel_pos_front_left = make_float3(wheel_base, width/2, 0);\n  // float3 wheel_pos_front_right = make_float3(wheel_base, -width/2, 0);\n  // float3 wheel_pos_rear_left = make_float3(0, width/2, 0);\n  // float3 wheel_pos_rear_right = make_float3(0, -width/2, 0);\n  float3 wheel_pos_wrt_base_link[4] = { make_float3(wheel_base, width / 2, 0), make_float3(wheel_base, -width / 2, 0),\n                                        make_float3(0, width / 2, 0), make_float3(0, -width / 2, 0) };\n  float Jxx = 1.0 / 12 * mass * (height * height + width * width);\n  float Jyy = 1.0 / 12 * mass * (height * height + wheel_base * wheel_base);\n  float Jzz = 1.0 / 12 * mass * (wheel_base * wheel_base + width * width);\n  float mu = 0.65;\n  float v_slip = 0.1;\n  static const int WHEEL_FRONT_LEFT = 0;\n  static const int WHEEL_FRONT_RIGHT = 1;\n  static const int WHEEL_REAR_LEFT = 2;\n  static const int WHEEL_REAR_RIGHT = 3;\n\n  // throttle model params\n  float c_t = 3.0;\n  float c_b = 10.0;\n  float c_v = 0.2;\n  float c_0 = 0;\n\n  // steering model params\n  float steering_constant = .6;\n  float steer_command_angle_scale = -2.45;\n  int gear_sign = 1;\n  RacerSuspensionParams()\n  {\n    recalcParams();\n  }\n\n  void __host__ recalcParams()\n  {\n    cg_pos_wrt_base_link = make_float3(wheel_base / 2, 0, 0.2);\n    l_0[0] = wheel_radius + mass / 4 * (-gravity) / k_s[0];\n    l_0[1] = wheel_radius + mass / 4 * (-gravity) / k_s[1];\n    l_0[2] = wheel_radius + mass / 4 * (-gravity) / k_s[2];\n    l_0[3] = wheel_radius + mass / 4 * (-gravity) / k_s[3];\n    wheel_pos_wrt_base_link[0] = make_float3(wheel_base, width / 2, 0);\n    wheel_pos_wrt_base_link[1] = make_float3(wheel_base, -width / 2, 0);\n    wheel_pos_wrt_base_link[2] = make_float3(0, width / 2, 0);\n    wheel_pos_wrt_base_link[3] = make_float3(0, -width / 2, 0);\n    Jxx = 1.0 / 12 * mass * (height * height + width * width);\n    Jyy = 1.0 / 12 * mass * (height * height + wheel_base * wheel_base);\n    Jzz = 1.0 / 12 * mass * (wheel_base * wheel_base + width * width);\n  }\n};\n\nusing namespace MPPI_internal;\n/**\n * state: x, y, z, quat [w, x, y, z], vx_I, vy_I, vz_I, omega_x, omega_y, omega_z, shaft angle, shaft angle velocity\n * control: throttle/brake, steering angle command\n * sensors: texture 0 is elevation map (normal x, normal y, normal z, height)\n */\nclass RacerSuspension : public Dynamics<RacerSuspension, RacerSuspensionParams>\n{\npublic:\n  typedef Dynamics<RacerSuspension, RacerSuspensionParams> PARENT_CLASS;\n  typedef typename PARENT_CLASS::state_array state_array;\n  typedef typename PARENT_CLASS::control_array control_array;\n  typedef typename PARENT_CLASS::output_array output_array;\n  typedef typename PARENT_CLASS::dfdx dfdx;\n  typedef typename PARENT_CLASS::dfdu dfdu;\n  using PARENT_CLASS::updateState;  // needed as overloading updateState here hides all parent versions of updateState\n\n  // number of floats for computing the state derivative BLOCK_DIM_X * BLOCK_DIM_Z times\n  static const int SHARED_MEM_REQUEST_BLK_BYTES = 0;\n\n  static const int TEXTURE_ELEVATION_MAP = 0;\n\n  RacerSuspension(cudaStream_t stream = nullptr) : PARENT_CLASS(stream)\n  {\n    tex_helper_ = new TwoDTextureHelper<float>(1, stream);\n  }\n  RacerSuspension(RacerSuspensionParams& params, cudaStream_t stream) : PARENT_CLASS(params, stream)\n  {\n    tex_helper_ = new TwoDTextureHelper<float>(1, stream);\n  }\n\n  std::string getDynamicsModelName() const override\n  {\n    return \"RACER Suspension Model\";\n  }\n\n  ~RacerSuspension()\n  {\n    delete tex_helper_;\n  }\n\n  void GPUSetup();\n\n  void freeCudaMem();\n\n  void paramsToDevice();\n\n  void updateState(const Eigen::Ref<const state_array> state, Eigen::Ref<state_array> next_state,\n                   Eigen::Ref<state_array> state_der, const float dt);\n  void step(Eigen::Ref<state_array> state, Eigen::Ref<state_array> next_state, Eigen::Ref<state_array> state_der,\n            const Eigen::Ref<const control_array> control, Eigen::Ref<output_array> output, const float t,\n            const float dt);\n\n  __device__ __host__ void computeStateDeriv(const Eigen::Ref<const state_array>& state,\n                                             const Eigen::Ref<const control_array>& control,\n                                             Eigen::Ref<state_array> state_der, Eigen::Ref<output_array> output,\n                                             Eigen::Matrix3f* omegaJacobian = nullptr);\n\n  __device__ void updateState(float* state, float* next_state, float* state_der, const float dt);\n\n  __device__ void computeStateDeriv(float* state, float* control, float* state_der, float* theta_s,\n                                    float* output = nullptr);\n  void enforceLeash(const Eigen::Ref<const state_array>& state_true, const Eigen::Ref<const state_array>& state_nominal,\n                    const Eigen::Ref<const state_array>& leash_values, Eigen::Ref<state_array> state_output);\n\n  __device__ void step(float* state, float* next_state, float* state_der, float* control, float* output, float* theta_s,\n                       const float t, const float dt);\n\n  TwoDTextureHelper<float>* getTextureHelper()\n  {\n    return tex_helper_;\n  }\n\n  Eigen::Quaternionf attitudeFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f positionFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f velocityFromState(const Eigen::Ref<const state_array>& state);\n  Eigen::Vector3f angularRateFromState(const Eigen::Ref<const state_array>& state);\n  state_array stateFromOdometry(const Eigen::Quaternionf& q_B_to_I, const Eigen::Vector3f& pos_base_link_I,\n                                const Eigen::Vector3f& vel_base_link_B, const Eigen::Vector3f& omega_B);\n\n  state_array stateFromMap(const std::map<std::string, float>& map) override;\n\nprotected:\n  TwoDTextureHelper<float>* tex_helper_ = nullptr;\n};\n\n#if __CUDACC__\n#include \"racer_suspension.cu\"\n#endif\n\n#endif  // MPPIGENERIC_RACER_SUSPENSION_CUH\n"
  },
  {
    "path": "include/mppi/feedback_controllers/CCM/ccm.h",
    "content": "/*\n * Created on Tue Jun 30 2020 by Bogdan\n */\n#ifndef FEEDBACK_CONTROLLERS_CCM_CCM_H_\n#define FEEDBACK_CONTROLLERS_CCM_CCM_H_\n\n#include <Eigen/Core>\n#include <mppi/feedback_controllers/feedback.cuh>\n\n#include <tuple>\n#include <cmath>\n\nnamespace ccm\n{\n// Convienient Types\ntemplate <int N = 1>\nusing Vectorf = Eigen::Matrix<float, N, 1>;\n\n// ================ Chebyshev Methods =================\n// Can also be written as pts, and weights being passed in.\ntemplate <int N = 1>\nstd::tuple<Vectorf<N>, Vectorf<N>> chebyshevPts()\n{\n  Vectorf<N> pts, weights;\n  int K = N - 1;\n  for (int i = K; i >= 0; i--)\n  {\n    pts[i] = (cosf(M_PI * i / K) + 1) / 2.0;\n  }\n  // Weights calculations. Weights look to be symmetric\n  int Kh = K * K - (K % 2 == 0) * 1;\n  weights[0] = 0.5 / Kh;\n  weights[K] = 0.5 / Kh;\n  int K_iteration = K / 2;\n  for (int k = 1; k <= K_iteration; k++)\n  {\n    float w_k = 0;\n    for (int j = 0; j <= K_iteration; j++)\n    {\n      int beta = 2;\n      if (j == 0 || j == K_iteration)\n      {\n        beta = 1;\n      }\n      w_k += beta * cosf((M_2_PI * j * k) / K) / (K * (1 - 4 * j * j));\n    }\n    weights[k] = w_k;\n    weights[K - k] = w_k;\n  }\n  return std::make_tuple(pts, weights);\n}\n\n// Create a polynomial matrix given a set of points and the max degree of the polynomials\ntemplate <int N = 1, int D = 2>\nEigen::Matrix<float, D, N> chebyshevPolynomial(const Eigen::Ref<const Vectorf<N>>& pts)\n{\n  Eigen::Matrix<float, D, N> T = Eigen::Matrix<float, D, N>::Zero();\n  T.row(0) = Vectorf<N>::Ones();\n  T.row(1) = pts;\n  for (int i = 2; i < D; i++)\n  {\n    for (int j = 0; j < N; j++)\n    {\n      T(i, j) = 2 * pts(j) * T(i - 1, j) - T(i - 2, j);\n    }\n  }\n  return T;\n}\n\n// Create the derivative of the polynomial matrix provided\ntemplate <int N = 1, int D = 2>\nEigen::Matrix<float, D, N> chebyshevPolynomialDerivative(const Eigen::Ref<const Eigen::Matrix<float, D, N>>& T,\n                                                         const Eigen::Ref<const Vectorf<N>>& pts)\n{\n  Eigen::Matrix<float, D, N> dT = Eigen::Matrix<float, D, N>::Zero();\n  dT.row(1) = Vectorf<N>::Ones();\n  for (int i = 2; i < D; i++)\n  {\n    for (int j = 0; j < N; j++)\n    {\n      dT(i, j) = 2 * T(i - 1, j) + 2 * pts(j) * dT(i - 1, j) - dT(i - 2, j);\n    }\n  }\n  return dT;\n}\n\n/**\n * Empty GPU Class for now\n **/\ntemplate <class DYN_T>\nclass LinearCCMGPU : public GPUFeedbackController<LinearCCMGPU<DYN_T>, DYN_T, GPUState>\n{\npublic:\n  // using DYN_T = typename GPUFeedbackController<LinearCCMGPU<DYN_T>, DYN_T, GPUState>::DYN_T;\n  LinearCCMGPU(cudaStream_t stream = 0) : GPUFeedbackController<LinearCCMGPU<DYN_T>, DYN_T, GPUState>(stream){};\n};\n\nstruct CCMParams\n{\n  int a = 0;\n};\n\ntemplate <class DYN_T, int NUM_TIMESTEPS = 100>\nclass LinearCCM : public FeedbackController<LinearCCMGPU<DYN_T>, CCMParams, NUM_TIMESTEPS>\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n  // Typedefs\n  using state_array = typename DYN_T::state_array;\n  using control_array = typename DYN_T::control_array;\n  using B_matrix = typename DYN_T::dfdu;\n  using RiemannianMetric = typename DYN_T::dfdx;\n  typedef FeedbackController<LinearCCMGPU<DYN_T>, CCMParams, NUM_TIMESTEPS> PARENT_CLASS;\n  using TEMPLATED_FEEDBACK_STATE = typename PARENT_CLASS::TEMPLATED_FEEDBACK_STATE;\n\n  typedef Eigen::Matrix<float, DYN_T::STATE_DIM, NUM_TIMESTEPS> state_trajectory;\n  typedef Eigen::Matrix<float, DYN_T::CONTROL_DIM, NUM_TIMESTEPS> control_trajectory;\n\n  LinearCCM() = default;\n  ~LinearCCM() = default;\n\n  LinearCCM(DYN_T* dyn)\n  {\n    model_ = dyn;\n    M_ = RiemannianMetric::Identity();\n    std::cout << \"\\033[1;33mCALLED CONSTRUCTOR\\033[0m\" << std::endl;\n  }\n  /**\n   * Methods that have to be overwritten for FeedbackController\n   **/\n  void initTrackingController(){};\n  void computeFeedback(const Eigen::Ref<const state_array>& init_state,\n                       const Eigen::Ref<const state_trajectory>& goal_traj,\n                       const Eigen::Ref<const control_trajectory>& control_traj)\n  {\n    // setNominalControlTrajectory(control_traj);\n    // setNominalStateTrajectory(goal_traj);\n  }\n\n  control_array k_(const Eigen::Ref<const state_array>& x_act, const Eigen::Ref<const state_array>& x_goal, int t,\n                   TEMPLATED_FEEDBACK_STATE& fb_state)\n  {\n    control_array u_nom_t = u_nominal_traj_.col(t);\n\n    return u_feedback(x_act, x_goal, u_nom_t);\n  }\n\n  // Generic Method for calculating the Metric based on state x\n  RiemannianMetric M(const Eigen::Ref<const state_array>& x)\n  {\n    RiemannianMetric W;\n    float x0_2 = 0;  // powf(x[0], 2);\n    float x1_2 = 0;  // powf(x[1], 2);\n    /** lambda = 0.8 **/\n    // float w_1_term = 0.0004616 * x0_2 + 0.0004616 * x1_2 + 2.0459139;\n    // float w_2_term = -0.0007369 * x0_2 - 0.007369 * x1_2 - 1.9129315;\n    // float w_3_term = -0.0007369 * x0_2 - 0.007369 * x1_2 - 1.9129315;\n    // float w_4_term = 0.0026403 * x0_2 + 0.0026403 * x1_2 + 5.2164191;\n\n    /** lmabda = 1.5 **/\n    // float w_1_term = 0.006889 * x0_2 + 0.006889 * x1_2 + 3.6390396;\n    // float w_2_term = -0.034485 * x0_2 - 0.034485 * x1_2 - 7.1123521;\n    // float w_3_term = -0.034485 * x0_2 - 0.034485 * x1_2 - 7.1123521;\n    // float w_4_term = 0.2511511 * x0_2 + 0.2511511 * x1_2 + 24.9433551;\n\n    /** lambda = 3.5 **/\n    float w_1_term = 0.0005948 * x0_2 + 0.0005948 * x1_2 + 2.2416827;\n    float w_2_term = -0.0044842 * x0_2 - 0.0044842 * x1_2 - 8.2434395;\n    float w_3_term = -0.0044842 * x0_2 - 0.0044842 * x1_2 - 8.2434395;\n    float w_4_term = 0.0521421 * x0_2 + 0.0521421 * x1_2 + 59.1072868;\n    W << w_1_term, 0, w_2_term, 0, 0, w_1_term, 0, w_2_term, w_3_term, 0, w_4_term, 0, 0, w_3_term, 0, w_4_term;\n    return W.inverse();\n    // return M_;\n  }\n\n  float Energy(const Eigen::Ref<const state_array>& delta_x, const Eigen::Ref<const state_array>& x)\n  {\n    return delta_x.transpose() * M(x) * delta_x;\n  }\n\n  // TODO Replace with some call to Dynamics\n  B_matrix B(const Eigen::Ref<const state_array>& x)\n  {\n    return model_->B(x);\n  }\n\n  state_array f(const Eigen::Ref<const state_array>& x,\n                const Eigen::Ref<const control_array>& u = control_array::Zero())\n  {\n    state_array x_der;\n    model_->computeStateDeriv(x, u, x_der);\n    return x_der;\n  }\n\n  control_array u_feedback(const Eigen::Ref<const state_array>& x_act, int t)\n  {\n    state_array x_nom_t = x_nominal_traj_.col(t);\n    control_array u_nom_t = u_nominal_traj_.col(t);\n    state_array delta_x = x_act - x_nom_t;\n\n    return u_feedback(x_act, x_nom_t, u_nom_t);\n  }\n\n  control_array u_feedback(const Eigen::Ref<const state_array>& x_act, const Eigen::Ref<const state_array>& x_nom,\n                           const Eigen::Ref<const control_array>& u_nom, bool debug = false)\n  {\n    state_array delta_x = x_act - x_nom;\n\n    float E = Energy(delta_x, x_act);\n    control_array lhs = 2 * B(x_act).transpose() * M(x_act) * delta_x;\n    float normalize_lhs = lhs.norm() * lhs.norm();\n    float rhs =\n        -2 * lambda_ * E - 2 * delta_x.transpose() * M(x_act) * (f(x_act) - f(x_nom) + (B(x_act) - B(x_nom)) * u_nom);\n    if (debug)\n    {\n      std::cout << \"Delta x: \" << delta_x.transpose() << std::endl;\n      std::cout << \"E: \" << E << \", RHS: \" << rhs << \", LHS Norm: \" << normalize_lhs << std::endl;\n    }\n\n    if (rhs > 0 || normalize_lhs == 0)\n    {\n      return control_array::Zero();\n    }\n    else\n    {\n      return rhs / normalize_lhs * lhs;\n    }\n  }\n\n  void setNominalControlTrajectory(const Eigen::Ref<const control_trajectory>&& u_traj)\n  {\n    u_nominal_traj_ = u_traj;\n  }\n\n  void setNominalStateTrajectory(const Eigen::Ref<const state_trajectory>& x_traj)\n  {\n    x_nominal_traj_ = x_traj;\n  }\n\n  void setM(const Eigen::Ref<const RiemannianMetric>& M_new)\n  {\n    M_ = M_new;\n  }\n\nprotected:\n  state_trajectory x_nominal_traj_ = state_trajectory::Zero();\n  control_trajectory u_nominal_traj_ = control_trajectory::Zero();\n  float lambda_ = 1.0;\n\n  RiemannianMetric M_ = RiemannianMetric::Identity();\n\n  DYN_T* model_;\n  B_matrix B_ = B_matrix::Zero();\n};\n\n}  // namespace ccm\n\n#endif  // FEEDBACK_CONTROLLERS_CCM_CCM_H_\n"
  },
  {
    "path": "include/mppi/feedback_controllers/DDP/ddp.cu",
    "content": "#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\ntemplate <class GPU_FB_T, class DYN_T, int NUM_TIMESTEPS>\nDeviceDDPImpl<GPU_FB_T, DYN_T, NUM_TIMESTEPS>::DeviceDDPImpl(int num_timesteps, cudaStream_t stream)\n  : num_timesteps_(num_timesteps)\n  , GPUFeedbackController<GPU_FB_T, DYN_T, DDPFeedbackState<DYN_T, NUM_TIMESTEPS>>(stream)\n{\n}\n\ntemplate <class GPU_FB_T, class DYN_T, int NUM_TIMESTEPS>\n__device__ void DeviceDDPImpl<GPU_FB_T, DYN_T, NUM_TIMESTEPS>::k(const float* __restrict__ x_act,\n                                                                 const float* __restrict__ x_goal, const int t,\n                                                                 float* __restrict__ theta,\n                                                                 float* __restrict__ control_output)\n{\n  float* fb_gain_t = &(this->state_.fb_gain_traj_[DYN_T::STATE_DIM * DYN_T::CONTROL_DIM * t]);\n  float e = 0;\n  for (int i = 0; i < DYN_T::STATE_DIM; i++)\n  {\n    e = x_act[i] - x_goal[i];\n    if (DYN_T::CONTROL_DIM % 4 == 0)\n    {  // load 4 floats in at a time to save on global memory reads\n      float4* fb_gain_t4 = reinterpret_cast<float4*>(&fb_gain_t[i * DYN_T::CONTROL_DIM]);\n      for (int j = 0; j < DYN_T::CONTROL_DIM / 4; j++)\n      {\n        reinterpret_cast<float4*>(control_output)[j] = fb_gain_t4[j] * e;\n      }\n    }\n    else if (DYN_T::CONTROL_DIM % 2 == 0)\n    {  // load 2 floats in at a time to save on global memory reads\n      float2* fb_gain_t2 = reinterpret_cast<float2*>(&fb_gain_t[i * DYN_T::CONTROL_DIM]);\n      for (int j = 0; j < DYN_T::CONTROL_DIM / 2; j++)\n      {\n        reinterpret_cast<float2*>(control_output)[j] = fb_gain_t2[j] * e;\n      }\n    }\n    else\n    {\n      for (int j = 0; j < DYN_T::CONTROL_DIM; j++)\n      {\n        control_output[j] += fb_gain_t[i * DYN_T::CONTROL_DIM + j] * e;\n      }\n    }\n  }\n}\n\n/**\n * CPU Class for DDP Methods\n */\ntemplate <class DYN_T, int NUM_TIMESTEPS>\nDDPFeedback<DYN_T, NUM_TIMESTEPS>::DDPFeedback(DYN_T* model, float dt, int num_timesteps, cudaStream_t stream)\n{\n  model_ = model;\n  this->dt_ = dt;\n  this->num_timesteps_ = std::max(num_timesteps, NUM_TIMESTEPS);\n  this->gpu_controller_->freeCudaMem();  // Remove allocated CUDA mem from default constructor\n  this->gpu_controller_ = std::make_shared<DeviceDDP<DYN_T, NUM_TIMESTEPS>>(this->num_timesteps_, stream);\n}\n\ntemplate <class DYN_T, int NUM_TIMESTEPS>\nvoid DDPFeedback<DYN_T, NUM_TIMESTEPS>::initTrackingController()\n{\n  util::DefaultLogger logger;\n  bool verbose = false;\n  ddp_model_ = std::make_shared<ModelWrapperDDP<DYN_T>>(model_);\n  ddp_solver_ = std::make_shared<DDP<ModelWrapperDDP<DYN_T>>>(this->dt_, this->num_timesteps_,\n                                                              this->params_.num_iterations, &logger, verbose);\n\n  result_ = OptimizerResult<ModelWrapperDDP<DYN_T>>();\n  result_.feedback_gain = feedback_gain_trajectory(this->num_timesteps_);\n  for (int i = 0; i < this->num_timesteps_; i++)\n  {\n    result_.feedback_gain[i] = DYN_T::feedback_matrix::Zero();\n  }\n\n  run_cost_ =\n      std::make_shared<TrackingCostDDP<ModelWrapperDDP<DYN_T>>>(this->params_.Q, this->params_.R, this->num_timesteps_);\n  terminal_cost_ = std::make_shared<TrackingTerminalCost<ModelWrapperDDP<DYN_T>>>(this->params_.Q_f);\n}\n\ntemplate <class DYN_T, int NUM_TIMESTEPS>\nvoid DDPFeedback<DYN_T, NUM_TIMESTEPS>::setParams(const DDPParams<DYN_T>& params)\n{\n  this->params_ = params;\n  run_cost_ =\n      std::make_shared<TrackingCostDDP<ModelWrapperDDP<DYN_T>>>(this->params_.Q, this->params_.R, this->num_timesteps_);\n  terminal_cost_ = std::make_shared<TrackingTerminalCost<ModelWrapperDDP<DYN_T>>>(this->params_.Q_f);\n}\n\ntemplate <class DYN_T, int NUM_TIMESTEPS>\nvoid DDPFeedback<DYN_T, NUM_TIMESTEPS>::computeFeedback(const Eigen::Ref<const state_array>& init_state,\n                                                        const Eigen::Ref<const state_trajectory>& goal_traj,\n                                                        const Eigen::Ref<const control_trajectory>& control_traj)\n{\n  run_cost_->setTargets(goal_traj.data(), control_traj.data(), this->num_timesteps_);\n\n  terminal_cost_->xf = run_cost_->traj_target_x_.col(this->num_timesteps_ - 1);\n\n  // update control ranges\n  for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n  {\n    control_min_(i) = model_->control_rngs_[i].x;\n    control_max_(i) = model_->control_rngs_[i].y;\n  }\n\n  result_ =\n      ddp_solver_->run(init_state, control_traj, *ddp_model_, *run_cost_, *terminal_cost_, control_min_, control_max_);\n\n  // Copy Feedback Gains into Feedback State\n  for (size_t i = 0; i < result_.feedback_gain.size(); i++)\n  {\n    int i_index = i * DYN_T::STATE_DIM * DYN_T::CONTROL_DIM;\n    for (size_t j = 0; j < DYN_T::CONTROL_DIM * DYN_T::STATE_DIM; j++)\n    {\n      this->getFeedbackStatePointer()->fb_gain_traj_[i_index + j] = result_.feedback_gain[i].data()[j];\n    }\n  }\n  // Actually put new feedback gain trajectory onto the GPU\n  // this->gpu_controller_->copyToDevice();\n}\n"
  },
  {
    "path": "include/mppi/feedback_controllers/DDP/ddp.cuh",
    "content": "/*\n * Created on Sun Sep 28 2020 by Bogdan\n */\n\n#ifndef FEEDBACK_CONTROLLERS_DDP_CUH_\n#define FEEDBACK_CONTROLLERS_DDP_CUH_\n\n#include <mppi/feedback_controllers/feedback.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <mppi/ddp/ddp_tracking_costs.h>\n#include <mppi/ddp/ddp.h>\n#include <mppi/ddp/util.h>\n#include <mppi/utils/math_utils.h>\n\ntemplate <class DYN_T>\nstruct DDPParams\n{\n  using StateCostWeight = typename TrackingCostDDP<ModelWrapperDDP<DYN_T>>::StateCostWeight;\n  using Hessian = typename TrackingTerminalCost<ModelWrapperDDP<DYN_T>>::Hessian;\n  using ControlCostWeight = typename TrackingCostDDP<ModelWrapperDDP<DYN_T>>::ControlCostWeight;\n\n  StateCostWeight Q = StateCostWeight::Identity();\n  Hessian Q_f = Hessian::Identity();\n  ControlCostWeight R = ControlCostWeight::Identity();\n  int num_iterations = 1;\n};\n\ntemplate <class DYN_T, int N_TIMESTEPS>\nstruct DDPFeedbackState : GPUState\n{\n  static const int FEEDBACK_SIZE = DYN_T::CONTROL_DIM * DYN_T::STATE_DIM * N_TIMESTEPS;\n  static const int NUM_TIMESTEPS = N_TIMESTEPS;\n\n  /**\n   * Variables\n   **/\n  float fb_gain_traj_[FEEDBACK_SIZE] MPPI_ALIGN(16) = { 0.0 }; // ensure it is aligned to 16 bytes\n\n  /**\n   * Methods\n   **/\n  bool isEqual(const DDPFeedbackState<DYN_T, N_TIMESTEPS>& other) const\n  {\n    for (int i = 0; i < FEEDBACK_SIZE; i++)\n    {\n      if (this->fb_gain_traj_[i] != other.fb_gain_traj_[i])\n      {\n        return false;\n      }\n    }\n    return true;\n  }\n};\n\n/**\n * Needed for Test in base_plant_tester.cu\n **/\ntemplate <class DYN_T, int N_TIMESTEPS>\nbool operator==(const DDPFeedbackState<DYN_T, N_TIMESTEPS>& lhs, const DDPFeedbackState<DYN_T, N_TIMESTEPS>& rhs)\n{\n  return lhs.isEqual(rhs);\n};\n\n/**\n * DDP GPU Controller class starting point. This class is where the actual\n * methods for DDP on the GPU are implemented but it is not used directly since\n * setting up the GPU_FB_T value would be painful\n */\ntemplate <class GPU_FB_T, class DYN_T, int NUM_TIMESTEPS = 1>\nclass DeviceDDPImpl : public GPUFeedbackController<GPU_FB_T, DYN_T, DDPFeedbackState<DYN_T, NUM_TIMESTEPS>>\n{\npublic:\n  using PARAMS_T = DDPFeedbackState<DYN_T, NUM_TIMESTEPS>;\n  // static const int SHARED_MEM_REQUEST_BLK_BYTES = DYN_T::CONTROL_DIM * DYN_T::STATE_DIM;\n  DeviceDDPImpl(int num_timesteps, cudaStream_t stream = 0);\n  DeviceDDPImpl(cudaStream_t stream = 0)\n    : GPUFeedbackController<GPU_FB_T, DYN_T, DDPFeedbackState<DYN_T, NUM_TIMESTEPS>>(stream){};\n\n  void allocateCUDAMemory(){};\n  void deallocateCUDAMemory(){};\n\n  __device__ void k(const float* __restrict__ x_act, const float* __restrict__  x_goal, const int t, float* __restrict__  theta, float* __restrict__  control_output);\n\nprotected:\n  // Needed for allocating memory for feedback gains\n  int num_timesteps_ = 1;\n};\n\n/**\n * Alias class for DDP GPU Controller. This sets up the class derivation correctly and is\n * used inside of the CPU version of DDP\n */\ntemplate <class DYN_T, int NUM_TIMESTEPS>\nclass DeviceDDP : public DeviceDDPImpl<DeviceDDP<DYN_T, NUM_TIMESTEPS>, DYN_T, NUM_TIMESTEPS>\n{\npublic:\n  DeviceDDP(int num_timesteps, cudaStream_t stream = 0)\n    : DeviceDDPImpl<DeviceDDP<DYN_T, NUM_TIMESTEPS>, DYN_T, NUM_TIMESTEPS>(num_timesteps, stream){};\n\n  DeviceDDP(cudaStream_t stream = 0) : DeviceDDPImpl<DeviceDDP<DYN_T, NUM_TIMESTEPS>, DYN_T, NUM_TIMESTEPS>(stream){};\n};\n\n/**\n * CPU Class for DDP. This is what the user should interact with\n */\ntemplate <class DYN_T, int NUM_TIMESTEPS>\nclass DDPFeedback : public FeedbackController<DeviceDDP<DYN_T, NUM_TIMESTEPS>, DDPParams<DYN_T>, NUM_TIMESTEPS>\n{\npublic:\n  /**\n   * Aliases\n   **/\n  typedef util::EigenAlignedVector<float, DYN_T::CONTROL_DIM, DYN_T::STATE_DIM> feedback_gain_trajectory;\n  typedef FeedbackController<DeviceDDP<DYN_T, NUM_TIMESTEPS>, DDPParams<DYN_T>, NUM_TIMESTEPS> PARENT_CLASS;\n\n  using control_array = typename PARENT_CLASS::control_array;\n  using state_array = typename PARENT_CLASS::state_array;\n  using state_trajectory = typename PARENT_CLASS::state_trajectory;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using INTERNAL_STATE_T = typename PARENT_CLASS::TEMPLATED_FEEDBACK_STATE;\n  using feedback_gain_matrix = typename DYN_T::feedback_matrix;\n  using square_state_matrix = typename DDPParams<DYN_T>::StateCostWeight;\n  using square_control_matrix = typename DDPParams<DYN_T>::ControlCostWeight;\n\n  /**\n   * Variables\n   **/\n  std::shared_ptr<ModelWrapperDDP<DYN_T>> ddp_model_;\n  std::shared_ptr<TrackingCostDDP<ModelWrapperDDP<DYN_T>>> run_cost_;\n  std::shared_ptr<TrackingTerminalCost<ModelWrapperDDP<DYN_T>>> terminal_cost_;\n  std::shared_ptr<DDP<ModelWrapperDDP<DYN_T>>> ddp_solver_;\n  OptimizerResult<ModelWrapperDDP<DYN_T>> result_;\n\n  control_array control_min_;\n  control_array control_max_;\n  DYN_T* model_;\n\n  DDPFeedback(DYN_T* model, float dt, int num_timesteps = NUM_TIMESTEPS, cudaStream_t stream = 0);\n\n  void setParams(const DDPParams<DYN_T>& params) override;\n\n  void initTrackingController();\n\n  control_array k_(const Eigen::Ref<const state_array>& x_act, const Eigen::Ref<const state_array>& x_goal, int t,\n                   INTERNAL_STATE_T& fb_state)\n  {\n    int index = DYN_T::STATE_DIM * DYN_T::CONTROL_DIM * t;\n    Eigen::Map<feedback_gain_matrix> fb_gain(&(fb_state.fb_gain_traj_[index]));\n    control_array u_output = fb_gain * (x_act - x_goal);\n    return u_output;\n  }\n\n  feedback_gain_trajectory getFeedbackGainsEigen()\n  {\n    return result_.feedback_gain;\n  }\n\n  void computeFeedback(const Eigen::Ref<const state_array>& init_state,\n                       const Eigen::Ref<const state_trajectory>& goal_traj,\n                       const Eigen::Ref<const control_trajectory>& control_traj);\n};\n\n#ifdef __CUDACC__\n#include \"ddp.cu\"\n#endif\n\n#endif  // FEEDBACK_CONTROLLERS_DDP_CUH_\n"
  },
  {
    "path": "include/mppi/feedback_controllers/feedback.cu",
    "content": "#include <mppi/feedback_controllers/feedback.cuh>\n\n// ===================== GPUFeedbackController ========================\ntemplate <class CLASS_T, class DYN_T, class FEEDBACK_STATE_T>\nvoid GPUFeedbackController<CLASS_T, DYN_T, FEEDBACK_STATE_T>::copyToDevice(bool synchronize)\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&feedback_d_->state_, &state_, sizeof(FEEDBACK_STATE_T), cudaMemcpyHostToDevice, stream_));\n    if (synchronize)\n    {\n      HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    }\n  }\n}\n\ntemplate <class CLASS_T, class DYN_T, class FEEDBACK_STATE_T>\nvoid GPUFeedbackController<CLASS_T, DYN_T, FEEDBACK_STATE_T>::freeCudaMem()\n{\n  if (GPUMemStatus_)\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    derived->deallocateCUDAMemory();\n    cudaFree(feedback_d_);\n    GPUMemStatus_ = false;\n    feedback_d_ = nullptr;\n  }\n}\n\ntemplate <class CLASS_T, class DYN_T, class FEEDBACK_STATE_T>\nvoid GPUFeedbackController<CLASS_T, DYN_T, FEEDBACK_STATE_T>::GPUSetup()\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  if (!GPUMemStatus_)\n  {\n    feedback_d_ = Managed::GPUSetup(derived);\n    derived->allocateCUDAMemory();\n  }\n  else\n  {\n    this->logger_->debug(\"Feedback Controller GPU Memory already set\\n\");\n  }\n  derived->copyToDevice();\n}\n\n// ===================== FeedbackController ========================\n"
  },
  {
    "path": "include/mppi/feedback_controllers/feedback.cuh",
    "content": "/*\n * Created on Sun Sep 6 2020 by Bogdan\n */\n\n#ifndef FEEDBACK_BASE_CUH_\n#define FEEDBACK_BASE_CUH_\n\n#include <Eigen/Core>\n#include <mppi/utils/managed.cuh>\n\n#include <cmath>\n#include <memory>\n\nstruct GPUState\n{\n};\n\n/**\n * This is the begining of the mess. So we have a GPU Feedback class that houses the methods and data\n * that are used on the GPU. The first template argument is for that cool thing that Jason knows the name of.\n * The second template argument is the dynamics class so that we can automatically pull out the state and control\n * dims needed. The final template argument is the state class. This state class is what will be passed back and forth\n * from the CPU to the GPU and vice versa (if necessary). For example, this would be your k_p, k_d, and k_i terms for a\n * PID, or the trajectory of feedback gains for DDP.\n *\n * Things a new controller will need:\n *   - a k method. This is how to use the feedback controller on the GPU\n *   - a GPU_STATE_T class. This should contain the relevant data to transfer from CPU to GPU and vice versa\n * Optional Things to implement:\n *   - copyFromDevice(), copyToDevice(), paramsToDevice(): if your data structure FEEDBACK_STATE_T is complex, you will\n * need to implement these yourself\n *   - allocateCUDAMemory(), deallocateCUDAMemory(): If your feedback class needs some CUDA memory beyond the\n * FEEDBACK_STATE_T, you will need to create and clear it here.\n */\ntemplate <class GPU_FB_T, class TEMPLATED_DYNAMICS, class GPU_STATE_T>\nclass GPUFeedbackController : public Managed\n{\npublic:\n  /**\n   * Type Aliasing\n   */\n  using DYN_T = TEMPLATED_DYNAMICS;\n  using FEEDBACK_STATE_T = GPU_STATE_T;\n\n  GPU_FB_T* feedback_d_ = nullptr;\n\n  /**\n   * Constructors\n   */\n\n  GPUFeedbackController() = default;\n\n  GPUFeedbackController(cudaStream_t stream = 0) : Managed(stream)\n  {\n    this->SHARED_MEM_REQUEST_GRD_BYTES = 0;\n    this->SHARED_MEM_REQUEST_BLK_BYTES = 0;\n  }\n\n  /**\n   * =================== METHODS THAT SHOULD NOT BE OVERWRITTEN ================\n   */\n  virtual ~GPUFeedbackController()\n  {\n    freeCudaMem();\n  };\n\n  // Overwrite of Managed->GPUSetup to call allocateCUDAMemory as well\n  void GPUSetup();\n  void freeCudaMem();\n\n  void setFeedbackState(const FEEDBACK_STATE_T& state)\n  {\n    state_ = state;\n    if (GPUMemStatus_)\n    {\n      GPU_FB_T& derived = static_cast<GPU_FB_T&>(*this);\n      derived.copyToDevice();\n    }\n  }\n\n  __host__ __device__ FEEDBACK_STATE_T getFeedbackState()\n  {\n    return state_;\n  }\n\n  __host__ __device__ FEEDBACK_STATE_T* getFeedbackStatePointer()\n  {\n    return &state_;\n  }\n\n  /**\n   * ==================== NECESSARY METHODS TO OVERWRITE =====================\n   */\n  __device__ void k(const float* __restrict__ x_act, const float* __restrict__  x_goal, const int t, float* __restrict__  theta, float* __restrict__  control_output)\n  {\n  }\n  /**\n   * ===================== OPTIONAL METHODS TO OVERWRITE ======================\n   */\n  /**\n   * Only needed to allocate/deallocate additional CUDA memory when appropriate,\n   * GPU pointer is already handled.\n   */\n  void allocateCUDAMemory()\n  {\n  }\n  void deallocateCUDAMemory()\n  {\n  }\n\n  __device__ void initializeFeedback(const float* __restrict__ x, const float* __restrict__ u, float* __restrict__ theta, const float t, const float dt)\n  {}\n\n  // Abstract method to copy information to GPU\n  // void copyToDevice() {}\n\n  // Copies the params to the device at the moment\n  void copyToDevice(bool synchronize = true);\n  // Method to return potential diagnostic information from GPU\n  void copyFromDevice(bool synchronize = true)\n  {\n  }\n\nprotected:\n  FEEDBACK_STATE_T state_;\n};\n\n/**\n * Steps to making a new one\n * Create the GPUFeedback class as an impl class like costs but is still templated on DYN\n * The actual GPUFeedback_act class will then be templated on DYN and inherit from the GPUFeedbackImpl\n * Write the feedback controller to use the GPUFeedback_act as thee GPU_FEEDBACK_T template option\n * It will then automatically create the right pointer\n */\ntemplate <class GPU_FB_T, class PARAMS_T, int NUM_TIMESTEPS>\nclass FeedbackController\n{\npublic:\n  // Type Defintions and aliases\n  typedef typename GPU_FB_T::DYN_T DYN_T;\n  // typedef FEEDBACK_STATE_T TEMPLATED_FEEDBACK_STATE;\n  typedef PARAMS_T TEMPLATED_PARAMS;\n  typedef GPU_FB_T TEMPLATED_GPU_FEEDBACK;\n  typedef typename GPU_FB_T::FEEDBACK_STATE_T TEMPLATED_FEEDBACK_STATE;\n  static const int FB_TIMESTEPS = NUM_TIMESTEPS;\n\n  using state_array = typename DYN_T::state_array;\n  using control_array = typename DYN_T::control_array;\n  typedef Eigen::Matrix<float, DYN_T::CONTROL_DIM,\n                        NUM_TIMESTEPS> control_trajectory;  // A control trajectory\n  typedef Eigen::Matrix<float, DYN_T::STATE_DIM,\n                        NUM_TIMESTEPS> state_trajectory;  // A state trajectory\n\n  // Constructors and Generators\n  FeedbackController(float dt = 0.01, int num_timesteps = NUM_TIMESTEPS, cudaStream_t stream = 0)\n    : dt_(dt), num_timesteps_(num_timesteps)\n  {\n    gpu_controller_ = std::make_shared<GPU_FB_T>(stream);\n    auto logger = std::make_shared<mppi::util::MPPILogger>();\n    setLogger(logger);\n  }\n\n  virtual ~FeedbackController()\n  {\n    freeCudaMem();\n  };\n\n  virtual __host__ void GPUSetup()\n  {\n    gpu_controller_->GPUSetup();\n  }\n\n  virtual __host__ void freeCudaMem()\n  {\n    gpu_controller_->freeCudaMem();\n  }\n\n  virtual __host__ void initTrackingController() = 0;\n\n  virtual __host__ void setParams(const PARAMS_T& params)\n  {\n    params_ = params;\n  }\n\n  PARAMS_T getParams()\n  {\n    return params_;\n  }\n\n  // CPU Methods\n  /**\n   * Compute feedback control method that should not be overwritten.\n   * Input:\n   *  - x_act: the state where the system is\n   *  - x_goal: the state we want to be at\n   *  - index: the number of timesteps from the initial time we are\n   */\n  virtual __host__ control_array k(const Eigen::Ref<const state_array>& x_act, const Eigen::Ref<const state_array>& x_goal,\n                          int t)\n  {\n    TEMPLATED_FEEDBACK_STATE* gpu_feedback_state = getFeedbackStatePointer();\n    return k_(x_act, x_goal, t, *gpu_feedback_state);\n  }\n  /**\n   * Feeback Control Method to overwrite.\n   */\n  virtual __host__ control_array k_(const Eigen::Ref<const state_array>& x_act, const Eigen::Ref<const state_array>& x_goal,\n                           int t, TEMPLATED_FEEDBACK_STATE& fb_state) = 0;\n\n  // might not be a needed method\n  virtual __host__ void computeFeedback(const Eigen::Ref<const state_array>& init_state,\n                               const Eigen::Ref<const state_trajectory>& goal_traj,\n                               const Eigen::Ref<const control_trajectory>& control_traj) = 0;\n\n  // TODO Construct a default version of this method that uses the state_ variable automatically\n  virtual __host__ control_array interpolateFeedback_(const Eigen::Ref<const state_array>& state,\n                                             const Eigen::Ref<const state_array>& goal_state, double rel_time,\n                                             TEMPLATED_FEEDBACK_STATE& fb_state)\n  {\n    int lower_idx = (int)(rel_time / dt_);\n    int upper_idx = lower_idx + 1;\n    double alpha = (rel_time - lower_idx * dt_) / dt_;\n\n    control_array u_fb =\n        (1 - alpha) * k_(state, goal_state, lower_idx, fb_state) + alpha * k_(state, goal_state, upper_idx, fb_state);\n\n    return u_fb;\n  }\n\n  virtual __host__ control_array interpolateFeedback(const Eigen::Ref<const state_array>& state,\n                                            const Eigen::Ref<const state_array>& goal_state, double rel_time)\n  {\n    TEMPLATED_FEEDBACK_STATE* fb_state = getFeedbackStatePointer();\n    return interpolateFeedback_(state, goal_state, rel_time, *fb_state);\n  }\n\n  GPU_FB_T* getDevicePointer()\n  {\n    return gpu_controller_->feedback_d_;\n  }\n\n  std::shared_ptr<GPU_FB_T> getHostPointer()\n  {\n    return gpu_controller_;\n  }\n\n  void bindToStream(cudaStream_t stream)\n  {\n    gpu_controller_->bindToStream(stream);\n  }\n\n  /**\n   * Calls GPU version\n   */\n  void copyToDevice(bool synchronize = true)\n  {\n    this->gpu_controller_->copyToDevice(synchronize);\n  }\n\n  TEMPLATED_FEEDBACK_STATE getFeedbackState()\n  {\n    return this->gpu_controller_->getFeedbackState();\n  }\n\n  TEMPLATED_FEEDBACK_STATE* getFeedbackStatePointer()\n  {\n    return this->gpu_controller_->getFeedbackStatePointer();\n  }\n\n  void setFeedbackState(const TEMPLATED_FEEDBACK_STATE& gpu_fb_state)\n  {\n    this->gpu_controller_->setFeedbackState(gpu_fb_state);\n  }\n\n  float getDt()\n  {\n    return dt_;\n  }\n  void setDt(float dt)\n  {\n    dt_ = dt;\n  }\n\n  __host__ void setLogger(const mppi::util::MPPILoggerPtr& logger)\n  {\n    logger_ = logger;\n    gpu_controller_->setLogger(logger);\n  }\n\n  __host__ void setLogLevel(const mppi::util::LOG_LEVEL& level)\n  {\n    logger_->setLogLevel(level);\n    gpu_controller_->setLogLevel(level);\n  }\n\n  __host__ mppi::util::MPPILoggerPtr getLogger()\n  {\n    return logger_;\n  }\n\n  __host__ mppi::util::MPPILoggerPtr getLogger() const\n  {\n    return logger_;\n  }\n\nprotected:\n  std::shared_ptr<GPU_FB_T> gpu_controller_;\n  float dt_;\n  int num_timesteps_;\n  PARAMS_T params_;\n  mppi::util::MPPILoggerPtr logger_ = nullptr;\n};\n\n#ifdef __CUDACC__\n#include \"feedback.cu\"\n#endif\n\n#endif  // FEEDBACK_BASE_CUH_\n"
  },
  {
    "path": "include/mppi/instantiations/autorally_mppi/autorally_mppi.cuh",
    "content": "#ifndef MPPIGENERIC_CONTROLLERERS_AUTORALLY_MPPI_CUH\n#define MPPIGENERIC_CONTROLLERERS_AUTORALLY_MPPI_CUH\n\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/cost_functions/autorally/ar_standard_cost.cuh>\n#include <mppi/dynamics/autorally/ar_nn_model.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\n// #ifdef USE_NEURAL_NETWORK_MODEL__ /*Use neural network dynamics model*/\nconst int MPPI_NUM_ROLLOUTS__ = 1920;\nconst int BLOCKSIZE_X = 8;\nconst int BLOCKSIZE_Y = 16;\nconst int NUM_TIMESTEPS = 150;\ntypedef NeuralNetModel<7, 2, 3> DynamicsModel;\ntypedef ARStandardCost CostFunctionClass;\n\ntypedef DDPFeedback<DynamicsModel, NUM_TIMESTEPS> FEEDBACK_T;\ntypedef mppi::sampling_distributions::GaussianDistribution<DynamicsModel::DYN_PARAMS_T> Sampler;\n\n// #elif USE_BASIS_FUNC_MODEL__ /*Use the basis function model* */\n// const int MPPI_NUM_ROLLOUTS__ = 2560;\n// const int BLOCKSIZE_X = 16;\n// const int BLOCKSIZE_Y = 4;\n// typedef GeneralizedLinear<CarBasisFuncs, 7, 2, 25, CarKinematics, 3> DynamicsModel;\n// #endif\n#endif  // MPPIGENERIC_CONTROLLERERS_AUTORALLY_MPPI_CUH\n"
  },
  {
    "path": "include/mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh",
    "content": "#ifndef MPPIGENERIC_CONTROLLERERS_CARTPOLE_MPPI_CUH\n#define MPPIGENERIC_CONTROLLERERS_CARTPOLE_MPPI_CUH\n\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n// #include <mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\n#endif  // MPPIGENERIC_CONTROLLERERS_CARTPOLE_MPPI_CUH\n"
  },
  {
    "path": "include/mppi/instantiations/double_integrator_mppi/double_integrator_mppi.cuh",
    "content": "#ifndef MPPIGENERIC_CONTROLLERERS_DOUBLE_INTEGRATOR_CUH\n#define MPPIGENERIC_CONTROLLERERS_DOUBLE_INTEGRATOR_CUH\n\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh>\n#include <mppi/controllers/R-MPPI/robust_mppi_controller.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_robust_cost.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\n#endif  // MPPIGENERIC_CONTROLLERERS_DOUBLE_INTEGRATOR_CUH\n"
  },
  {
    "path": "include/mppi/instantiations/quadrotor_mppi/quadrotor_mppi.cuh",
    "content": "#ifndef MPPI_GENERIC_CONTROLLERS_QUADROTOR_MPPI_CUH_\n#define MPPI_GENERIC_CONTROLLERS_QUADROTOR_MPPI_CUH_\n\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n// #include <mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh>\n// #include <mppi/controllers/R-MPPI/robust_mppi_controller.cuh>\n#include <mppi/cost_functions/quadrotor/quadrotor_map_cost.cuh>\n#include <mppi/cost_functions/quadrotor/quadrotor_quadratic_cost.cuh>\n#include <mppi/dynamics/quadrotor/quadrotor_dynamics.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\n#endif  // MPPI_GENERIC_CONTROLLERS_QUADROTOR_MPPI_CUH_\n"
  },
  {
    "path": "include/mppi/sampling_distributions/colored_noise/colored_noise.cu",
    "content": "/**\n * Created by Bogdan Vlahov on 3/25/2023\n **/\n\n#define COLORED_TEMPLATE template <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n#define COLORED_NOISE ColoredNoiseDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n#include <mppi/utils/math_utils.h>\n\n__global__ void configureFrequencyNoise(cufftComplex* noise, float* variance, int num_samples, int control_dim,\n                                        int num_freq)\n{\n  int sample_index = blockDim.x * blockIdx.x + threadIdx.x;\n  int freq_index = blockDim.y * blockIdx.y + threadIdx.y;\n  int control_index = blockDim.z * blockIdx.z + threadIdx.z;\n\n  if (sample_index < num_samples && freq_index < num_freq && control_index < control_dim)\n  {\n    int noise_index = (sample_index * control_dim + control_index) * num_freq + freq_index;\n    int variance_index = control_index * num_freq + freq_index;\n    noise[noise_index].x *= variance[variance_index];\n    if (freq_index == 0)\n    {\n      noise[noise_index].y = 0;\n    }\n    else if (num_freq % 2 == 1 && freq_index == num_freq - 1)\n    {\n      noise[noise_index].y = 0;\n    }\n    else\n    {\n      noise[noise_index].y *= variance[variance_index];\n    }\n  }\n}\n\n__global__ void rearrangeNoise(float* input, float* output, float* variance, int num_trajectories, int num_timesteps,\n                               int control_dim, int offset_t, float decay_rate)\n{\n  const int sample_index = blockIdx.x * blockDim.x + threadIdx.x;\n  const int time_index = blockIdx.y * blockDim.y + threadIdx.y;\n  const int control_index = blockIdx.z * blockDim.z + threadIdx.z;\n  const float decayed_offset = decay_rate == 0 ? 0 : powf(decay_rate, time_index);\n  if (sample_index < num_trajectories && time_index < (num_timesteps) && control_index < control_dim)\n  {  // cuFFT does not normalize inverse transforms so a division by the num_timesteps is required\n    output[(sample_index * num_timesteps + time_index) * control_dim + control_index] =\n        (input[(sample_index * control_dim + control_index) * 2 * num_timesteps + time_index] -\n         input[(sample_index * control_dim + control_index) * 2 * num_timesteps + offset_t] * decayed_offset) /\n        (variance[control_index] * 2 * num_timesteps);\n    // printf(\"ROLLOUT %d CONTROL %d TIME %d: in %f out: %f\\n\", sample_index, control_index, time_index,\n    //     input[(sample_index * control_dim + control_index) * num_timesteps + time_index],\n    //     output[(sample_index * num_timesteps + time_index) * control_dim + control_index]);\n  }\n}\n\nvoid powerlaw_psd_gaussian(std::vector<float>& exponents, int num_timesteps, int num_trajectories,\n                           float* control_noise_d, int offset_t, curandGenerator_t& gen, float offset_decay_rate,\n                           cudaStream_t stream, float fmin)\n{\n  const int BLOCKSIZE_X = 32;\n  const int BLOCKSIZE_Y = 32;\n  const int BLOCKSIZE_Z = 1;\n  int control_dim = exponents.size();\n\n  std::vector<float> sample_freq;\n  const int sample_num_timesteps = num_timesteps * 2;\n  fftfreq(sample_num_timesteps, sample_freq);\n  float cutoff_freq = fmaxf(fmin, 1.0f / sample_num_timesteps);\n  int freq_size = sample_freq.size();\n\n  int smaller_index = 0;\n  Eigen::MatrixXf sample_freqs(freq_size, control_dim);\n\n  // Adjust the weighting of each frequency by the exponents\n  for (int i = 0; i < freq_size; i++)\n  {\n    if (sample_freq[i] < cutoff_freq)\n    {\n      smaller_index++;\n    }\n    else if (smaller_index < freq_size)\n    {\n      for (int j = 0; j < smaller_index; j++)\n      {\n        sample_freq[j] = sample_freq[smaller_index];\n        for (int k = 0; k < control_dim; k++)\n        {\n          sample_freqs(j, k) = powf(sample_freq[smaller_index], -exponents[k] / 2.0f);\n        }\n      }\n    }\n    for (int j = 0; j < control_dim; j++)\n    {\n      sample_freqs(i, j) = powf(sample_freq[i], -exponents[j] / 2.0f);\n    }\n  }\n\n  // Calculate variance\n  float sigma[control_dim] = { 0 };\n  for (int i = 0; i < control_dim; i++)\n  {\n    for (int j = 1; j < freq_size - 1; j++)\n    {\n      sigma[i] += SQ(sample_freqs(j, i));\n    }\n    // std::for_each(sample_freq.begin() + 1, sample_freq.end() - 1, [&sigma, &i](float j) { sigma[i] += powf(j, 2); });\n    sigma[i] += SQ(sample_freqs(freq_size - 1, i) * ((1.0f + (sample_num_timesteps % 2)) / 2.0f));\n    sigma[i] = 2.0f * sqrtf(sigma[i]) / sample_num_timesteps;\n  }\n\n  // Sample the noise in frequency domain and reutrn to time domain\n  cufftHandle plan;\n  const int batch = num_trajectories * control_dim;\n  // Need 2 * (sample_num_timesteps / 2 + 1) * batch of randomly sampled values\n  // float* samples_in_freq_d;\n  float* sigma_d;\n  float* noise_in_time_d;\n  cufftComplex* samples_in_freq_complex_d;\n  float* freq_coeffs_d;\n  // HANDLE_ERROR(cudaMallocAsync((void**)&samples_in_freq_d, sizeof(float) * 2 * batch * freq_size, stream));\n  // HANDLE_ERROR(cudaMallocAsync((void**)&samples_in_freq_d, sizeof(float) * 2 * batch * sample_num_timesteps,\n  // stream));\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n  HANDLE_ERROR(cudaMallocAsync((void**)&freq_coeffs_d, sizeof(float) * freq_size * control_dim, stream));\n  HANDLE_ERROR(cudaMallocAsync((void**)&samples_in_freq_complex_d, sizeof(cufftComplex) * batch * freq_size, stream));\n  HANDLE_ERROR(cudaMallocAsync((void**)&noise_in_time_d, sizeof(float) * batch * sample_num_timesteps, stream));\n  HANDLE_ERROR(cudaMallocAsync((void**)&sigma_d, sizeof(float) * control_dim, stream));\n#else\n  HANDLE_ERROR(cudaMalloc((void**)&freq_coeffs_d, sizeof(float) * freq_size * control_dim));\n  HANDLE_ERROR(cudaMalloc((void**)&samples_in_freq_complex_d, sizeof(cufftComplex) * batch * freq_size));\n  HANDLE_ERROR(cudaMalloc((void**)&noise_in_time_d, sizeof(float) * batch * sample_num_timesteps));\n  HANDLE_ERROR(cudaMalloc((void**)&sigma_d, sizeof(float) * control_dim));\n#endif\n  // curandSetStream(gen, stream);\n  HANDLE_CURAND_ERROR(curandGenerateNormal(gen, (float*)samples_in_freq_complex_d, 2 * batch * freq_size, 0.0, 1.0));\n  HANDLE_ERROR(cudaMemcpyAsync(freq_coeffs_d, sample_freqs.data(), sizeof(float) * freq_size * control_dim,\n                               cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(sigma_d, sigma, sizeof(float) * control_dim, cudaMemcpyHostToDevice, stream));\n  const int variance_grid_x = (num_trajectories - 1) / BLOCKSIZE_X + 1;\n  const int variance_grid_y = (freq_size - 1) / BLOCKSIZE_Y + 1;\n  const int variance_grid_z = (control_dim - 1) / BLOCKSIZE_Z + 1;\n  dim3 grid(variance_grid_x, variance_grid_y, variance_grid_z);\n  dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);\n  // configureFrequencyNoise<<<grid, block, 0, stream>>>((cuComplex*) samples_in_freq_d, freq_coeffs_d, freq_size,\n  // batch);\n  configureFrequencyNoise<<<grid, block, 0, stream>>>(samples_in_freq_complex_d, freq_coeffs_d, num_trajectories,\n                                                      control_dim, freq_size);\n  HANDLE_ERROR(cudaGetLastError());\n  HANDLE_CUFFT_ERROR(cufftPlan1d(&plan, sample_num_timesteps, CUFFT_C2R, batch));\n  HANDLE_CUFFT_ERROR(cufftSetStream(plan, stream));\n  // freq_data needs to be batch number of sample_num_timesteps/2 + 1 cuComplex values\n  // time_data needs to be batch * sample_num_timesteps floats\n  HANDLE_CUFFT_ERROR(cufftExecC2R(plan, samples_in_freq_complex_d, noise_in_time_d));\n  const int reorder_grid_x = (num_trajectories - 1) / BLOCKSIZE_X + 1;\n  const int reorder_grid_y = (num_timesteps - 1) / BLOCKSIZE_Y + 1;\n  const int reorder_grid_z = (control_dim - 1) / BLOCKSIZE_Z + 1;\n  dim3 reorder_grid(reorder_grid_x, reorder_grid_y, reorder_grid_z);\n  dim3 reorder_block(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);\n  // std::cout << \"Grid: \" << reorder_grid.x << \", \" << reorder_grid.y << \", \" << reorder_grid.z << std::endl;\n  // std::cout << \"Block: \" << reorder_block.x << \", \" << reorder_block.y << \", \" << reorder_block.z << std::endl;\n  rearrangeNoise<<<reorder_grid, reorder_block, 0, stream>>>(noise_in_time_d, control_noise_d, sigma_d,\n                                                             num_trajectories, num_timesteps, control_dim, offset_t,\n                                                             offset_decay_rate);\n  HANDLE_ERROR(cudaGetLastError());\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  HANDLE_CUFFT_ERROR(cufftDestroy(plan));\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n  // HANDLE_ERROR(cudaFreeAsync(samples_in_freq_d, stream));\n  HANDLE_ERROR(cudaFreeAsync(freq_coeffs_d, stream));\n  HANDLE_ERROR(cudaFreeAsync(sigma_d, stream));\n  HANDLE_ERROR(cudaFreeAsync(samples_in_freq_complex_d, stream));\n  HANDLE_ERROR(cudaFreeAsync(noise_in_time_d, stream));\n#else\n  // HANDLE_ERROR(cudaFree(samples_in_freq_d));\n  HANDLE_ERROR(cudaFree(freq_coeffs_d));\n  HANDLE_ERROR(cudaFree(sigma_d));\n  HANDLE_ERROR(cudaFree(samples_in_freq_complex_d));\n  HANDLE_ERROR(cudaFree(noise_in_time_d));\n#endif\n}\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\nCOLORED_TEMPLATE\nCOLORED_NOISE::ColoredNoiseDistributionImpl(cudaStream_t stream) : PARENT_CLASS::GaussianDistributionImpl(stream)\n{\n}\n\nCOLORED_TEMPLATE\nCOLORED_NOISE::ColoredNoiseDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS::GaussianDistributionImpl(params, stream)\n{\n}\n\nCOLORED_TEMPLATE\n__host__ void COLORED_NOISE::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    cudaFree(freq_coeffs_d_);\n    cudaFree(samples_in_freq_complex_d_);\n    cudaFree(noise_in_time_d_);\n    cudaFree(frequency_sigma_d_);\n    freq_coeffs_d_ = nullptr;\n    frequency_sigma_d_ = nullptr;\n    noise_in_time_d_ = nullptr;\n    samples_in_freq_complex_d_ = nullptr;\n    cufftDestroy(plan_);\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\nCOLORED_TEMPLATE\n__host__ void COLORED_NOISE::allocateCUDAMemoryHelper()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper();\n  if (this->GPUMemStatus_)\n  {\n    const int sample_num_timesteps = 2 * this->getNumTimesteps();\n    const int freq_size = sample_num_timesteps / 2 + 1;\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n    if (frequency_sigma_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(frequency_sigma_d_, this->stream_));\n    }\n    if (samples_in_freq_complex_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(samples_in_freq_complex_d_, this->stream_));\n    }\n    if (noise_in_time_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(noise_in_time_d_, this->stream_));\n    }\n    if (freq_coeffs_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(freq_coeffs_d_, this->stream_));\n    }\n    HANDLE_ERROR(\n        cudaMallocAsync((void**)&freq_coeffs_d_, sizeof(float) * freq_size * this->CONTROL_DIM, this->stream_));\n    HANDLE_ERROR(cudaMallocAsync((void**)&frequency_sigma_d_, sizeof(float) * this->CONTROL_DIM, this->stream_));\n    HANDLE_ERROR(cudaMallocAsync((void**)&samples_in_freq_complex_d_,\n                                 sizeof(cufftComplex) * this->getNumRollouts() * this->CONTROL_DIM * freq_size *\n                                     this->getNumDistributions(),\n                                 this->stream_));\n    HANDLE_ERROR(cudaMallocAsync((void**)&noise_in_time_d_,\n                                 sizeof(float) * this->getNumRollouts() * this->CONTROL_DIM * sample_num_timesteps *\n                                     this->getNumDistributions(),\n                                 this->stream_));\n#else\n    if (frequency_sigma_d_)\n    {\n      HANDLE_ERROR(cudaFree(frequency_sigma_d_));\n    }\n    if (samples_in_freq_complex_d_)\n    {\n      HANDLE_ERROR(cudaFree(samples_in_freq_complex_d_));\n    }\n    if (noise_in_time_d_)\n    {\n      HANDLE_ERROR(cudaFree(noise_in_time_d_));\n    }\n    if (freq_coeffs_d_)\n    {\n      HANDLE_ERROR(cudaFree(freq_coeffs_d_));\n    }\n    HANDLE_ERROR(cudaMalloc((void**)&freq_coeffs_d_, sizeof(float) * freq_size * this->CONTROL_DIM));\n    HANDLE_ERROR(cudaMalloc((void**)&frequency_sigma_d_, sizeof(float) * this->CONTROL_DIM));\n    HANDLE_ERROR(cudaMalloc((void**)&samples_in_freq_complex_d_, sizeof(cufftComplex) * this->getNumRollouts() *\n                                                                     this->CONTROL_DIM * freq_size *\n                                                                     this->getNumDistributions()));\n    HANDLE_ERROR(cudaMalloc((void**)&noise_in_time_d_, sizeof(float) * this->getNumRollouts() * this->CONTROL_DIM *\n                                                           sample_num_timesteps * this->getNumDistributions()));\n#endif\n    // Recreate FFT Plan\n    HANDLE_CUFFT_ERROR(cufftPlan1d(&plan_, sample_num_timesteps, CUFFT_C2R,\n                                   this->getNumRollouts() * this->getNumDistributions() * this->CONTROL_DIM));\n    HANDLE_CUFFT_ERROR(cufftSetStream(plan_, this->stream_));\n  }\n}\n\nCOLORED_TEMPLATE\n__host__ void COLORED_NOISE::generateSamples(const int& optimization_stride, const int& iteration_num,\n                                             curandGenerator_t& gen, bool synchronize)\n{\n  const int BLOCKSIZE_X = this->params_.rewrite_controls_block_dim.x;\n  const int BLOCKSIZE_Y = this->params_.rewrite_controls_block_dim.y;\n  const int BLOCKSIZE_Z = this->params_.rewrite_controls_block_dim.z;\n  const int num_trajectories = this->getNumRollouts() * this->getNumDistributions();\n\n  std::vector<float> sample_freq;\n  const int sample_num_timesteps = 2 * this->getNumTimesteps();\n  fftfreq(sample_num_timesteps, sample_freq);\n  const float cutoff_freq = fmaxf(this->params_.fmin, 1.0f / sample_num_timesteps);\n  const int freq_size = sample_freq.size();\n\n  int smaller_index = 0;\n  const int local_control_dim = this->CONTROL_DIM;  // Needed for methods which use pass by reference\n  Eigen::MatrixXf sample_freqs(freq_size, local_control_dim);\n\n  // Adjust the weighting of each frequency by the exponents\n  for (int i = 0; i < freq_size; i++)\n  {\n    if (sample_freq[i] < cutoff_freq)\n    {\n      smaller_index++;\n    }\n    else if (smaller_index < freq_size)\n    {\n      for (int j = 0; j < smaller_index; j++)\n      {\n        sample_freq[j] = sample_freq[smaller_index];\n        for (int k = 0; k < this->CONTROL_DIM; k++)\n        {\n          sample_freqs(j, k) = powf(sample_freq[smaller_index], -this->params_.exponents[k] / 2.0f);\n        }\n      }\n    }\n    for (int j = 0; j < this->CONTROL_DIM; j++)\n    {\n      sample_freqs(i, j) = powf(sample_freq[i], -this->params_.exponents[j] / 2.0f);\n    }\n  }\n\n  // Calculate variance\n  float sigma[this->CONTROL_DIM] = { 0 };\n  for (int i = 0; i < this->CONTROL_DIM; i++)\n  {\n    for (int j = 1; j < freq_size - 1; j++)\n    {\n      sigma[i] += SQ(sample_freqs(j, i));\n    }\n    sigma[i] += SQ(sample_freqs(freq_size - 1, i) * ((1.0f + (sample_num_timesteps % 2)) / 2.0f));\n    sigma[i] = 2.0f * sqrtf(sigma[i]) / sample_num_timesteps;\n  }\n\n  // Sample the noise in frequency domain and reutrn to time domain\n  const int batch = num_trajectories * this->CONTROL_DIM;\n  // Need 2 * (sample_num_timesteps / 2 + 1) * batch of randomly sampled values\n  HANDLE_CURAND_ERROR(curandGenerateNormal(gen, (float*)samples_in_freq_complex_d_, 2 * batch * freq_size, 0.0, 1.0));\n  HANDLE_ERROR(cudaMemcpyAsync(freq_coeffs_d_, sample_freqs.data(), sizeof(float) * freq_size * this->CONTROL_DIM,\n                               cudaMemcpyHostToDevice, this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(frequency_sigma_d_, sigma, sizeof(float) * this->CONTROL_DIM, cudaMemcpyHostToDevice,\n                               this->stream_));\n  const int num_trajectories_grid_x = mppi::math::int_ceil(num_trajectories, BLOCKSIZE_X);\n  const int variance_grid_y = (freq_size - 1) / BLOCKSIZE_Y + 1;\n  const int control_grid_z = mppi::math::int_ceil(local_control_dim, BLOCKSIZE_Z);\n  dim3 grid(num_trajectories_grid_x, variance_grid_y, control_grid_z);\n  dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);\n  configureFrequencyNoise<<<grid, block, 0, this->stream_>>>(samples_in_freq_complex_d_, freq_coeffs_d_,\n                                                             num_trajectories, this->CONTROL_DIM, freq_size);\n  HANDLE_ERROR(cudaGetLastError());\n  // freq_data needs to be batch number of num_timesteps/2 + 1 cuComplex values\n  // time_data needs to be batch * num_timesteps floats\n  HANDLE_CUFFT_ERROR(cufftExecC2R(plan_, samples_in_freq_complex_d_, noise_in_time_d_));\n\n  // Change axes ordering from [trajectories, control, time] to [trajectories, time, control]\n  const int reorder_grid_y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  dim3 reorder_grid(num_trajectories_grid_x, reorder_grid_y, control_grid_z);\n  rearrangeNoise<<<reorder_grid, block, 0, this->stream_>>>(\n      noise_in_time_d_, this->control_samples_d_, frequency_sigma_d_, num_trajectories, this->getNumTimesteps(),\n      this->CONTROL_DIM, optimization_stride, this->getOffsetDecayRate());\n\n  // Rewrite pure noise into actual control samples\n  dim3 control_writing_grid;\n  control_writing_grid.x = mppi::math::int_ceil(this->getNumRollouts(), BLOCKSIZE_X);\n  control_writing_grid.y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  control_writing_grid.z = mppi::math::int_ceil(this->getNumDistributions(), BLOCKSIZE_Z);\n  unsigned int std_dev_mem_size = this->getNumDistributions() * this->CONTROL_DIM;\n  // Allocate shared memory for std_deviations per timestep or constant across the trajectory\n  std_dev_mem_size = mppi::math::nearest_multiple_4(\n      this->params_.time_specific_std_dev ? std_dev_mem_size * this->getNumTimesteps() : std_dev_mem_size);\n  unsigned int shared_mem_size =\n      std_dev_mem_size +\n      mppi::math::nearest_multiple_4(this->getNumDistributions() * this->getNumTimesteps() * this->CONTROL_DIM) +\n      mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Y * BLOCKSIZE_Z * this->CONTROL_DIM);\n  shared_mem_size *= sizeof(float);\n  setGaussianControls<<<control_writing_grid, this->params_.rewrite_controls_block_dim, shared_mem_size,\n                        this->stream_>>>(\n      this->control_means_d_, this->std_dev_d_, this->control_samples_d_, this->CONTROL_DIM, this->getNumTimesteps(),\n      this->getNumRollouts(), this->getNumDistributions(), optimization_stride,\n      powf(this->params_.std_dev_decay, iteration_num), this->params_.pure_noise_trajectories_percentage,\n      this->params_.time_specific_std_dev);\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n}  // namespace sampling_distributions\n}  // namespace mppi\n#undef COLORED_TEMPLATE\n#undef COLORED_NOISE\n"
  },
  {
    "path": "include/mppi/sampling_distributions/colored_noise/colored_noise.cuh",
    "content": "#pragma once\n/**\n * Created by Bogdan, Dec 16, 2021\n * based off of https://github.com/felixpatzelt/colorednoise/blob/master/colorednoise.py\n */\n\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/utils/gpu_err_chk.cuh>\n\n#include <Eigen/Dense>\n#include <cufft.h>\n#include <curand.h>\n\n#include <algorithm>\n#include <iostream>\n#include <vector>\n\n__global__ void configureFrequencyNoise(cufftComplex* noise, float* variance, int num_samples, int control_dim,\n                                        int num_freq);\n\n__global__ void rearrangeNoise(float* input, float* output, float* variance, int num_trajectories, int num_timesteps,\n                               int control_dim, int offset_t, float decay_rate = 0.0);\n\nvoid fftfreq(const int num_samples, std::vector<float>& result, const float spacing = 1)\n{\n  // result is of size floor(n/2) + 1\n  int result_size = num_samples / 2 + 1;\n  result.clear();\n  result.resize(result_size);\n  for (int i = 0; i < result_size; i++)\n  {\n    result[i] = i / (spacing * num_samples);\n  }\n}\n\nvoid powerlaw_psd_gaussian(std::vector<float>& exponents, int num_timesteps, int num_trajectories,\n                           float* control_noise_d, int offset_t, curandGenerator_t& gen, float offset_decay_rate,\n                           cudaStream_t stream = 0, float fmin = 0.0);\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\ntemplate <int C_DIM, int MAX_DISTRIBUTIONS = 2>\nstruct ColoredNoiseParamsImpl : public GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS>\n{\n  float exponents[C_DIM * MAX_DISTRIBUTIONS] = { 0.0f };\n  float offset_decay_rate = 0.97;\n  float fmin = 0.0;\n\n  ColoredNoiseParamsImpl(int num_rollouts = 1, int num_timesteps = 1, int num_distributions = 1)\n    : GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS>(num_rollouts, num_timesteps, num_distributions)\n  {\n  }\n\n  void copyExponentToDistribution(const int src_distribution_idx, const int dest_distribution_idx)\n  {\n    bool src_out_of_distribution = src_distribution_idx >= MAX_DISTRIBUTIONS;\n    if (src_out_of_distribution || dest_distribution_idx >= MAX_DISTRIBUTIONS)\n    {\n      printf(\"%s Distribution %d is out of range. There are only %d total distributions\\n\",\n             src_out_of_distribution ? \"Src\" : \"Dest\",\n             src_out_of_distribution ? src_distribution_idx : dest_distribution_idx, MAX_DISTRIBUTIONS);\n      return;\n    }\n    float* exponents_src = exponents[C_DIM * src_distribution_idx];\n    float* exponents_dest = exponents[C_DIM * dest_distribution_idx];\n    for (int i = 0; i < C_DIM; i++)\n    {\n      exponents_dest[i] = exponents_src[i];\n    }\n  }\n};\n\ntemplate <int C_DIM>\nusing ColoredNoiseParams = ColoredNoiseParamsImpl<C_DIM, 2>;\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE = ColoredNoiseParams, class DYN_PARAMS_T = DynamicsParams>\nclass ColoredNoiseDistributionImpl : public GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n  using control_array = typename PARENT_CLASS::control_array;\n\n  static const int CONTROL_DIM = PARENT_CLASS::CONTROL_DIM;\n\n  ColoredNoiseDistributionImpl(cudaStream_t stream = 0);\n  ColoredNoiseDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0);\n\n  ~ColoredNoiseDistributionImpl()\n  {\n    freeCudaMem();\n  }\n\n  __host__ virtual std::string getSamplingDistributionName() const override\n  {\n    return \"Colored Noise\";\n  }\n\n  __host__ void allocateCUDAMemoryHelper();\n\n  __host__ void freeCudaMem();\n\n  __host__ void generateSamples(const int& optimization_stride, const int& iteration_num, curandGenerator_t& gen,\n                                bool synchronize = true);\n\n  __host__ __device__ float getOffsetDecayRate() const\n  {\n    return this->params_.offset_decay_rate;\n  }\n\n  void setOffsetDecayRate(const float decay_rate)\n  {\n    this->params_.offset_decay_rate = decay_rate;\n  }\n\nprotected:\n  cufftHandle plan_;\n  float* frequency_sigma_d_ = nullptr;\n  float* noise_in_time_d_ = nullptr;\n  cufftComplex* samples_in_freq_complex_d_ = nullptr;\n  float* freq_coeffs_d_ = nullptr;\n};\n\ntemplate <class DYN_PARAMS_T>\nclass ColoredNoiseDistribution\n  : public ColoredNoiseDistributionImpl<ColoredNoiseDistribution<DYN_PARAMS_T>, ColoredNoiseParams, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = ColoredNoiseDistributionImpl<ColoredNoiseDistribution, ColoredNoiseParams, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n\n  ColoredNoiseDistribution(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n  ColoredNoiseDistribution(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0) : PARENT_CLASS(params, stream)\n  {\n  }\n};\n\n}  // namespace sampling_distributions\n}  // namespace mppi\n\n#ifdef __CUDACC__\n#include \"colored_noise.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/sampling_distributions/gaussian/gaussian.cu",
    "content": "/**\n * Created by Bogdan Vlahov on 3/24/2023\n **/\n\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n#include <mppi/utils/math_utils.h>\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\n#define GAUSSIAN_TEMPLATE template <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n#define GAUSSIAN_CLASS GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n\n__global__ void setGaussianControls(const float* __restrict__ mean_d, const float* __restrict__ std_dev_d,\n                                    float* __restrict__ control_samples_d, const int control_dim,\n                                    const int num_timesteps, const int num_rollouts, const int num_distributions,\n                                    const int optimization_stride, const float std_dev_decay,\n                                    const float pure_noise_percentage, const bool time_specific_std_dev)\n{\n  const int trajectory_index = threadIdx.x + blockDim.x * blockIdx.x;\n  const int distribution_index = threadIdx.z + blockDim.z * blockIdx.z;\n  const int time_index = threadIdx.y + blockDim.y * blockIdx.y;\n  const bool valid_index =\n      trajectory_index < num_rollouts && time_index < num_timesteps && distribution_index < num_distributions;\n  const auto& num_timesteps_block = blockDim.y;\n  const auto& num_rollouts_block = blockDim.x;\n  const int shared_noise_index = threadIdx.z * num_timesteps_block * num_rollouts_block * control_dim +\n                                 threadIdx.x * num_timesteps_block * control_dim + threadIdx.y * control_dim;\n  const int global_noise_index =\n      min(distribution_index, num_distributions) * num_timesteps * num_rollouts * control_dim +\n      min(trajectory_index, num_rollouts) * num_timesteps * control_dim + min(time_index, num_timesteps) * control_dim;\n  const int shared_mean_index = distribution_index * num_timesteps * control_dim + time_index * control_dim;\n  // Std Deviation setup\n  int std_dev_size = num_distributions * control_dim;\n  int shared_std_dev_index = threadIdx.z * num_timesteps * control_dim + threadIdx.y * control_dim;\n  int global_std_dev_index = min(distribution_index, num_distributions) * num_timesteps * control_dim +\n                             min(time_index, num_timesteps) * control_dim;\n  shared_std_dev_index = time_specific_std_dev ? shared_std_dev_index : 0;\n  global_std_dev_index = time_specific_std_dev ? global_std_dev_index : 0;\n  std_dev_size = time_specific_std_dev ? num_timesteps * std_dev_size : std_dev_size;\n\n  // local variables\n  int i, j, k;\n\n  // Shared memory setup\n  /**\n   * @brief Shared memory setup\n   * This kernel has three shared memory arrays, mean_shared, std_dev_shared, and control_samples_shared.\n   * In order to prevent memory alignment issues, the memory is being over-allocated to ensure that they are start on\n   * the float4 boundary mean_shared - size should be num_timesteps * num_distributions * control_dim std_dev_shared =\n   * num_distributions * control_dim if time_specific_std_dev is false std_dev_shared = num_distributions *\n   * num_timesteps * control_dim if time_specific_std_dev is true control_samples_shared = BLOCKSIZE_X * BLOCKSIZE_Y *\n   * BLOCKSIZE_Z * control_dim\n   *\n   */\n  extern __shared__ float entire_buffer[];\n  // Create memory_aligned shared memory pointers\n  float* mean_shared = entire_buffer;\n  float* std_dev_shared = &mean_shared[mppi::math::nearest_multiple_4(num_timesteps * num_distributions * control_dim)];\n  float* control_samples_shared = &std_dev_shared[mppi::math::nearest_multiple_4(std_dev_size)];\n  if (control_dim % 4 == 0)\n  {\n    // Step 1: copy means into shared memory\n    for (i = threadIdx.z; i < num_distributions; i += blockDim.z)\n    {\n      for (j = threadIdx.y; j < num_timesteps; j += blockDim.y)\n      {\n        const int mean_index = (i * num_timesteps + j) * control_dim;\n        float4* mean_shared4 = reinterpret_cast<float4*>(&mean_shared[mean_index]);\n        const float4* mean_d4 = reinterpret_cast<const float4*>(&mean_d[mean_index]);\n        for (k = threadIdx.x; k < control_dim / 4; k += blockDim.x)\n        {\n          mean_shared4[k] = mean_d4[k];\n        }\n      }\n    }\n\n    // Step 2: load std_dev to shared memory\n    const float4* std_dev_d4 = reinterpret_cast<const float4*>(&std_dev_d[global_std_dev_index]);\n    float4* std_dev_shared4 = reinterpret_cast<float4*>(&std_dev_shared[shared_std_dev_index]);\n    for (i = threadIdx.x; i < control_dim / 4; i += blockDim.x)\n    {\n      std_dev_shared4[i] = std_dev_decay * std_dev_d4[i];\n    }\n\n    // Step 3: load noise into shared memory\n    float4* control_samples_shared4 = reinterpret_cast<float4*>(&control_samples_shared[shared_noise_index]);\n    float4* control_samples_d4 = reinterpret_cast<float4*>(&control_samples_d[global_noise_index]);\n    // Create const pointre to mean in shared memory as it shouldn't change henceforth\n    const float4* mean_shared4 = reinterpret_cast<const float4*>(&mean_shared[shared_mean_index]);\n    for (i = 0; valid_index && i < control_dim / 4; i++)\n    {\n      control_samples_shared4[i] = control_samples_d4[i];\n    }\n\n    __syncthreads();  // wait for all copying from global to shared memory to finish\n    // Step 4: do mean + variance calculations\n    if (valid_index && (trajectory_index == 0 || time_index < optimization_stride))\n    {  // 0 noise trajectory\n      for (i = 0; i < control_dim / 4; i++)\n      {\n        control_samples_shared4[i] = mean_shared4[i];\n      }\n    }\n    else if (valid_index && trajectory_index >= (1.0f - pure_noise_percentage) * num_rollouts)\n    {  // doing zero mean trajectories\n      for (i = 0; i < control_dim / 4; i++)\n      {\n        control_samples_shared4[i] = std_dev_shared4[i] * control_samples_shared4[i];\n      }\n    }\n    else if (valid_index)\n    {\n      for (i = 0; i < control_dim / 4; i++)\n      {\n        control_samples_shared4[i] = mean_shared4[i] + std_dev_shared4[i] * control_samples_shared4[i];\n      }\n    }\n\n    // save back to global memory\n    for (i = 0; valid_index && i < control_dim / 4; i++)\n    {\n      control_samples_d4[i] = control_samples_shared4[i];\n    }\n  }\n  else if (control_dim % 2 == 0)\n  {\n    // Step 1: copy means into shared memory\n    for (i = threadIdx.z; i < num_distributions; i += blockDim.z)\n    {\n      for (j = threadIdx.y; j < num_timesteps; j += blockDim.y)\n      {\n        const int mean_index = (i * num_timesteps + j) * control_dim;\n        float2* mean_shared2 = reinterpret_cast<float2*>(&mean_shared[mean_index]);\n        const float2* mean_d2 = reinterpret_cast<const float2*>(&mean_d[mean_index]);\n        for (k = threadIdx.x; k < control_dim / 2; k += blockDim.x)\n        {\n          mean_shared2[k] = mean_d2[k];\n        }\n      }\n    }\n\n    // Step 2: load std_dev to shared memory\n    const float2* std_dev_d2 = reinterpret_cast<const float2*>(&std_dev_d[global_std_dev_index]);\n    float2* std_dev_shared2 = reinterpret_cast<float2*>(&std_dev_shared[shared_std_dev_index]);\n    for (i = threadIdx.x; i < control_dim / 2; i += blockDim.x)\n    {\n      std_dev_shared2[i] = std_dev_decay * std_dev_d2[i];\n    }\n\n    // Step 3: load noise into shared memory\n    float2* control_samples_shared2 = reinterpret_cast<float2*>(&control_samples_shared[shared_noise_index]);\n    float2* control_samples_d2 = reinterpret_cast<float2*>(&control_samples_d[global_noise_index]);\n    // Create const pointer to mean in shared memory as it shouldn't change henceforth\n    const float2* mean_shared2 = reinterpret_cast<const float2*>(&mean_shared[shared_mean_index]);\n    for (i = 0; valid_index && i < control_dim / 2; i++)\n    {\n      control_samples_shared2[i] = control_samples_d2[i];\n    }\n\n    __syncthreads();  // wait for all copying from global to shared memory to finish\n    // Step 4: do mean + variance calculations\n    if (valid_index && (trajectory_index == 0 || time_index < optimization_stride))\n    {  // 0 noise trajectory\n      for (i = 0; i < control_dim / 2; i++)\n      {\n        control_samples_shared2[i] = mean_shared2[i];\n      }\n    }\n    else if (valid_index && trajectory_index >= (1.0f - pure_noise_percentage) * num_rollouts)\n    {  // doing zero mean trajectories\n      for (i = 0; i < control_dim / 2; i++)\n      {\n        control_samples_shared2[i] = std_dev_shared2[i] * control_samples_shared2[i];\n      }\n    }\n    else if (valid_index)\n    {\n      for (i = 0; i < control_dim / 2; i++)\n      {\n        control_samples_shared2[i] = mean_shared2[i] + std_dev_shared2[i] * control_samples_shared2[i];\n      }\n    }\n\n    // save back to global memory\n    for (i = 0; valid_index && i < control_dim / 2; i++)\n    {\n      control_samples_d2[i] = control_samples_shared2[i];\n    }\n  }\n  else\n  {  // No memory alignment to take advantage of\n    // Step 1: copy means into shared memory\n    for (i = threadIdx.z; i < num_distributions; i += blockDim.z)\n    {\n      for (j = threadIdx.y; j < num_timesteps; j += blockDim.y)\n      {\n        const int mean_index = (i * num_timesteps + j) * control_dim;\n        for (k = threadIdx.x; k < control_dim; k += blockDim.x)\n        {\n          mean_shared[mean_index + k] = mean_d[mean_index + k];\n        }\n      }\n    }\n    // __syncthreads();\n    // if (blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0)\n    // {\n    //   printf(\"Mean: \");\n    //   for (i = 0; i < num_timesteps; i++)\n    //   {\n    //     printf(\"%f, \", mean_d[i]);\n    //   }\n    //   printf(\"\\n\");\n    // }\n    // __syncthreads();\n\n    // Step 2: load std_dev to shared memory\n    for (i = threadIdx.x; i < control_dim; i += blockDim.x)\n    {\n      std_dev_shared[shared_std_dev_index + i] = std_dev_decay * std_dev_d[global_std_dev_index + i];\n    }\n\n    // Step 3: load noise into shared memory\n    for (i = 0; valid_index && i < control_dim; i++)\n    {\n      control_samples_shared[shared_noise_index + i] = control_samples_d[global_noise_index + i];\n    }\n    // __syncthreads();\n    // if (trajectory_index == 10 && time_index == 20)\n    // {\n    //   printf(\"Control noise %d at time %d: \", trajectory_index, time_index);\n    //   for (i = 0; i < control_dim; i++)\n    //   {\n    //     printf(\"%f, \", control_samples_shared[shared_noise_index + i]);\n    //   }\n    //   printf(\"\\n std_dev_decay: %f, optimization_stride: %d\\n\", std_dev_decay, optimization_stride);\n    //   printf(\"Std Dev: %f\\n\", std_dev_shared[shared_std_dev_index]);\n    // }\n    // __syncthreads();\n\n    __syncthreads();  // wait for all copying from global to shared memory to finish\n    // Step 4: do mean + variance calculations\n    if (valid_index && (trajectory_index == 0 || time_index < optimization_stride))\n    {  // 0 noise trajectory\n      for (i = 0; i < control_dim; i++)\n      {\n        control_samples_shared[shared_noise_index + i] = mean_shared[shared_mean_index + i];\n      }\n    }\n    else if (valid_index && trajectory_index >= (1.0f - pure_noise_percentage) * num_rollouts)\n    {  // doing zero mean trajectories\n      for (i = 0; i < control_dim; i++)\n      {\n        control_samples_shared[shared_noise_index + i] =\n            std_dev_shared[shared_std_dev_index + i] * control_samples_shared[shared_noise_index + i];\n      }\n    }\n    else if (valid_index)\n    {\n      for (i = 0; i < control_dim; i++)\n      {\n        control_samples_shared[shared_noise_index + i] =\n            mean_shared[shared_mean_index + i] +\n            std_dev_shared[shared_std_dev_index + i] * control_samples_shared[shared_noise_index + i];\n      }\n    }\n    __syncthreads();\n    // save back to global memory\n    for (i = 0; valid_index && i < control_dim; i++)\n    {\n      control_samples_d[global_noise_index + i] = control_samples_shared[shared_noise_index + i];\n    }\n  }\n}\n\nGAUSSIAN_TEMPLATE\nGAUSSIAN_CLASS::GaussianDistributionImpl(cudaStream_t stream) : PARENT_CLASS::SamplingDistribution(stream)\n{\n}\n\nGAUSSIAN_TEMPLATE\nGAUSSIAN_CLASS::GaussianDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS::SamplingDistribution(params, stream)\n{\n}\n\nGAUSSIAN_TEMPLATE\n__host__ void GAUSSIAN_CLASS::allocateCUDAMemoryHelper()\n{\n  if (this->GPUMemStatus_)\n  {\n    if (std_dev_d_)\n    {\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n      HANDLE_ERROR(cudaFreeAsync(std_dev_d_, this->stream_));\n#else\n      HANDLE_ERROR(cudaFree(std_dev_d_));\n#endif\n    }\n    if (control_means_d_)\n    {  // deallocate previous memory control trajectory means\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n      HANDLE_ERROR(cudaFreeAsync(control_means_d_, this->stream_));\n#else\n      HANDLE_ERROR(cudaFree(control_means_d_));\n#endif\n    }\n\n    int std_dev_size = CONTROL_DIM * this->getNumDistributions();\n    if (this->params_.time_specific_std_dev)\n    {\n      std_dev_size *= this->getNumTimesteps();\n    }\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n    HANDLE_ERROR(cudaMallocAsync((void**)&std_dev_d_, sizeof(float) * std_dev_size, this->stream_));\n    HANDLE_ERROR(cudaMallocAsync((void**)&control_means_d_,\n                                 sizeof(float) * this->getNumDistributions() * this->getNumTimesteps() * CONTROL_DIM,\n                                 this->stream_));\n#else\n    HANDLE_ERROR(cudaMalloc((void**)&std_dev_d_, sizeof(float) * std_dev_size));\n    HANDLE_ERROR(cudaMalloc((void**)&control_means_d_,\n                            sizeof(float) * this->getNumDistributions() * this->getNumTimesteps() * CONTROL_DIM));\n#endif\n    means_.resize(this->getNumDistributions() * this->getNumTimesteps() * CONTROL_DIM);\n    // Ensure that the device side point knows where the the standard deviation memory is located\n    HANDLE_ERROR(cudaMemcpyAsync(&this->sampling_d_->std_dev_d_, &std_dev_d_, sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(&this->sampling_d_->control_means_d_, &control_means_d_, sizeof(float*),\n                                 cudaMemcpyHostToDevice, this->stream_));\n  }\n}\n\nGAUSSIAN_TEMPLATE\n__host__ void GAUSSIAN_CLASS::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFree(control_means_d_));\n    HANDLE_ERROR(cudaFree(std_dev_d_));\n    control_means_d_ = nullptr;\n    std_dev_d_ = nullptr;\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\nGAUSSIAN_TEMPLATE\nvoid GAUSSIAN_CLASS::paramsToDevice(bool synchronize)\n{\n  PARENT_CLASS::paramsToDevice(false);\n  if (this->GPUMemStatus_)\n  {\n    if (this->params_.time_specific_std_dev)\n    {\n      HANDLE_ERROR(cudaMemcpyAsync(this->std_dev_d_, this->params_.std_dev,\n                                   sizeof(float) * CONTROL_DIM * this->getNumTimesteps() * this->getNumDistributions(),\n                                   cudaMemcpyHostToDevice, this->stream_));\n    }\n    else\n    {\n      HANDLE_ERROR(cudaMemcpyAsync(this->std_dev_d_, this->params_.std_dev,\n                                   sizeof(float) * CONTROL_DIM * this->getNumDistributions(), cudaMemcpyHostToDevice,\n                                   this->stream_));\n    }\n    if (synchronize)\n    {\n      HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n    }\n  }\n}\n\nGAUSSIAN_TEMPLATE\n__host__ void GAUSSIAN_CLASS::generateSamples(const int& optimization_stride, const int& iteration_num,\n                                              curandGenerator_t& gen, bool synchronize)\n{\n  if (this->params_.use_same_noise_for_all_distributions)\n  {\n    HANDLE_CURAND_ERROR(curandGenerateNormal(\n        gen, this->control_samples_d_, this->getNumTimesteps() * this->getNumRollouts() * CONTROL_DIM, 0.0f, 1.0f));\n    for (int i = 1; i < this->getNumDistributions(); i++)\n    {\n      HANDLE_ERROR(cudaMemcpyAsync(\n          &this->control_samples_d_[this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM * i],\n          this->control_samples_d_, sizeof(float) * this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM,\n          cudaMemcpyDeviceToDevice, this->stream_));\n    }\n  }\n  else\n  {\n    HANDLE_CURAND_ERROR(curandGenerateNormal(\n        gen, this->control_samples_d_,\n        this->getNumTimesteps() * this->getNumRollouts() * this->getNumDistributions() * CONTROL_DIM, 0.0f, 1.0f));\n  }\n  const int BLOCKSIZE_X = this->params_.rewrite_controls_block_dim.x;\n  const int BLOCKSIZE_Y = this->params_.rewrite_controls_block_dim.y;\n  const int BLOCKSIZE_Z = this->params_.rewrite_controls_block_dim.z;\n  /**\n   * Generate noise samples with mean added\n   **/\n  dim3 control_writing_grid;\n  control_writing_grid.x = mppi::math::int_ceil(this->getNumRollouts(), BLOCKSIZE_X);\n  control_writing_grid.y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  control_writing_grid.z = mppi::math::int_ceil(this->getNumDistributions(), BLOCKSIZE_Z);\n  unsigned int std_dev_mem_size = this->getNumDistributions() * CONTROL_DIM;\n  // Allocate shared memory for std_deviations per timestep or constant across the trajectory\n  std_dev_mem_size = mppi::math::nearest_multiple_4(\n      this->params_.time_specific_std_dev ? std_dev_mem_size * this->getNumTimesteps() : std_dev_mem_size);\n  unsigned int shared_mem_size =\n      std_dev_mem_size +\n      mppi::math::nearest_multiple_4(this->getNumDistributions() * this->getNumTimesteps() * CONTROL_DIM) +\n      mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Y * BLOCKSIZE_Z * CONTROL_DIM);\n  shared_mem_size *= sizeof(float);\n  // std::cout << \"Shared mem size: \" << shared_mem_size << \" bytes. BLOCKSIZE_X: \" << BLOCKSIZE_X\n  //           << \", BLOCKSIZE_Y: \" << BLOCKSIZE_Y << \", BLOCKSIZE_Z: \" << BLOCKSIZE_Z\n  //           << \"Grid: (\" << control_writing_grid.x << \", \" << control_writing_grid.y\n  //           << \", \" << control_writing_grid.z << \")\" << std::endl;\n  setGaussianControls<<<control_writing_grid, this->params_.rewrite_controls_block_dim, shared_mem_size,\n                        this->stream_>>>(\n      this->control_means_d_, this->std_dev_d_, this->control_samples_d_, CONTROL_DIM, this->getNumTimesteps(),\n      this->getNumRollouts(), this->getNumDistributions(), optimization_stride,\n      powf(this->params_.std_dev_decay, iteration_num), this->params_.pure_noise_trajectories_percentage,\n      this->params_.time_specific_std_dev);\n\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\nGAUSSIAN_TEMPLATE\n__host__ void GAUSSIAN_CLASS::updateDistributionParamsFromDevice(const float* trajectory_weights_d, float normalizer,\n                                                                 const int& distribution_i, bool synchronize)\n{\n  if (distribution_i >= this->getNumDistributions())\n  {\n    this->logger_->error(\n        \"Updating distributional params for distribution %d out of %d total. Distribution out of bounds.\\n\",\n        distribution_i, this->getNumDistributions());\n    return;\n  }\n  float* control_samples_i_d =\n      &(this->control_samples_d_[distribution_i * this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM]);\n  float* control_mean_i_d = &(this->control_means_d_[distribution_i * this->getNumTimesteps() * CONTROL_DIM]);\n  mppi::kernels::launchWeightedReductionKernel<CONTROL_DIM>(trajectory_weights_d, control_samples_i_d, control_mean_i_d,\n                                                            normalizer, this->getNumTimesteps(), this->getNumRollouts(),\n                                                            this->params_.sum_strides, this->stream_, synchronize);\n  HANDLE_ERROR(cudaMemcpyAsync(&means_[distribution_i * this->getNumTimesteps() * CONTROL_DIM], control_mean_i_d,\n                               sizeof(float) * this->getNumTimesteps() * CONTROL_DIM, cudaMemcpyDeviceToHost,\n                               this->stream_));\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\nGAUSSIAN_TEMPLATE\n__host__ void GAUSSIAN_CLASS::setHostOptimalControlSequence(float* optimal_control_trajectory,\n                                                            const int& distribution_i, bool synchronize)\n{\n  if (distribution_i >= this->getNumDistributions())\n  {\n    this->logger_->error(\n        \"Asking for optimal control sequence from distribution %d out of %d total. Distribution out of bounds.\\n\",\n        distribution_i, this->getNumDistributions());\n    return;\n  }\n\n  HANDLE_ERROR(cudaMemcpyAsync(\n      optimal_control_trajectory, &(this->control_means_d_[this->getNumTimesteps() * CONTROL_DIM * distribution_i]),\n      sizeof(float) * this->getNumTimesteps() * CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\nGAUSSIAN_TEMPLATE\n__host__ __device__ float GAUSSIAN_CLASS::computeLikelihoodRatioCost(const float* __restrict__ u,\n                                                                     float* __restrict__ theta_d,\n                                                                     const int sample_index, const int t,\n                                                                     const int distribution_idx, const float lambda,\n                                                                     const float alpha)\n{\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n  const int distribution_i = distribution_idx >= params_p->num_distributions ? 0 : distribution_idx;\n  float* std_dev = &(params_p->std_dev[CONTROL_DIM * distribution_i]);\n  if (params_p->time_specific_std_dev)\n  {\n    std_dev = &(params_p->std_dev[(distribution_i * params_p->num_timesteps + t) * CONTROL_DIM]);\n  }\n  float* mean = &(this->control_means_d_[(params_p->num_timesteps * distribution_i + t) * CONTROL_DIM]);\n  float* control_cost_coeff = params_p->control_cost_coeff;\n\n  float cost = 0.0f;\n  int i, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(i, step);\n\n  if (CONTROL_DIM % 4 == 0)\n  {\n    float4 cost_i = make_float4(0.0f, 0.0f, 0.0f, 0.0f);\n    float4 mean_i, std_dev_i, u_i, control_cost_coeff_i;\n    for (; i < CONTROL_DIM / 4; i += step)\n    {\n      if (sample_index >= (1.0f - params_p->pure_noise_trajectories_percentage) * params_p->num_rollouts)\n      {\n        mean_i = make_float4(0.0f, 0.0f, 0.0f, 0.0f);\n      }\n      else\n      {\n        mean_i = reinterpret_cast<float4*>(mean)[i];  // read mean value from global memory only once\n      }\n      std_dev_i = reinterpret_cast<float4*>(std_dev)[i];\n      u_i = reinterpret_cast<const float4*>(u)[i];\n      control_cost_coeff_i = reinterpret_cast<float4*>(control_cost_coeff)[i];\n      // cost_i += control_cost_coeff_i * mean_i * (mean_i + 2 * (u_i - mean_i)) / (std_dev_i * std_dev_i);\n      cost_i += control_cost_coeff_i * mean_i * (mean_i - 2.0f * u_i) / (std_dev_i * std_dev_i);  // Proper way\n    }\n    cost += cost_i.x + cost_i.y + cost_i.z + cost_i.w;\n  }\n  else if (CONTROL_DIM % 2 == 0)\n  {\n    float2 cost_i = make_float2(0.0f, 0.0f);\n    float2 mean_i, std_dev_i, u_i, control_cost_coeff_i;\n    for (; i < CONTROL_DIM / 2; i += step)\n    {\n      if (sample_index >= (1.0f - params_p->pure_noise_trajectories_percentage) * params_p->num_rollouts)\n      {\n        mean_i = make_float2(0.0f, 0.0f);\n      }\n      else\n      {\n        mean_i = reinterpret_cast<float2*>(mean)[i];  // read mean value from global memory only once\n      }\n      std_dev_i = reinterpret_cast<float2*>(std_dev)[i];\n      u_i = reinterpret_cast<const float2*>(u)[i];\n      control_cost_coeff_i = reinterpret_cast<float2*>(control_cost_coeff)[i];\n      // cost_i += control_cost_coeff_i * mean_i * (mean_i + 2 * (u_i - mean_i)) / (std_dev_i * std_dev_i);\n      cost_i += control_cost_coeff_i * mean_i * (mean_i - 2.0f * u_i) / (std_dev_i * std_dev_i);  // Proper way\n    }\n    cost += cost_i.x + cost_i.y;\n  }\n  else\n  {\n    float mean_i;\n    for (; i < CONTROL_DIM; i += step)\n    {\n      if (sample_index >= (1.0f - params_p->pure_noise_trajectories_percentage) * params_p->num_rollouts)\n      {\n        mean_i = 0.0f;\n      }\n      else\n      {\n        mean_i = mean[i];  // read mean value from global memory only once\n      }\n      cost += control_cost_coeff[i] * mean_i * (mean_i - 2.0f * u[i]) / (std_dev[i] * std_dev[i]);  // Proper way\n      // float noise = u[i] - mean_i;\n      // cost += control_cost_coeff[i] * mean_i * (u[i] + noise) / (std_dev[i] * std_dev[i]); // Way in cost kernel\n    }\n  }\n  return 0.5f * lambda * (1.0f - alpha) * cost;\n}\n\nGAUSSIAN_TEMPLATE\n__host__ __device__ float GAUSSIAN_CLASS::computeFeedbackCost(const float* __restrict__ u_fb,\n                                                              float* __restrict__ theta_d, const int t,\n                                                              const int distribution_idx, const float lambda,\n                                                              const float alpha)\n{\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n  const int distribution_i = distribution_idx >= params_p->num_distributions ? 0 : distribution_idx;\n  float* std_dev = &(params_p->std_dev[CONTROL_DIM * distribution_i]);\n  if (params_p->time_specific_std_dev)\n  {\n    std_dev = &(params_p->std_dev[(distribution_i * params_p->num_timesteps + t) * CONTROL_DIM]);\n  }\n  float* control_cost_coeff = params_p->control_cost_coeff;\n\n  float cost = 0.0f;\n  int i, step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(i, step);\n\n  if (CONTROL_DIM % 4 == 0)\n  {\n    float4 cost_i = make_float4(0.0f, 0.0f, 0.0f, 0.0f);\n    float4 std_dev_i, control_cost_coeff_i, u_fb_i;\n    for (; i < CONTROL_DIM / 4; i += step)\n    {\n      std_dev_i = reinterpret_cast<float4*>(std_dev)[i];\n      u_fb_i = reinterpret_cast<const float4*>(u_fb)[i];\n      control_cost_coeff_i = reinterpret_cast<float4*>(control_cost_coeff)[i];\n      cost_i += control_cost_coeff_i * (u_fb_i * u_fb_i) / (std_dev_i * std_dev_i);\n    }\n    cost += cost_i.x + cost_i.y + cost_i.z + cost_i.w;\n  }\n  else if (CONTROL_DIM % 2 == 0)\n  {\n    float2 cost_i = make_float2(0.0f, 0.0f);\n    float2 std_dev_i, control_cost_coeff_i, u_fb_i;\n    for (; i < CONTROL_DIM / 2; i += step)\n    {\n      std_dev_i = reinterpret_cast<float2*>(std_dev)[i];\n      control_cost_coeff_i = reinterpret_cast<float2*>(control_cost_coeff)[i];\n      u_fb_i = reinterpret_cast<const float2*>(u_fb)[i];\n      cost_i += control_cost_coeff_i * (u_fb_i * u_fb_i) / (std_dev_i * std_dev_i);\n    }\n    cost += cost_i.x + cost_i.y;\n  }\n  else\n  {\n    for (; i < CONTROL_DIM; i += step)\n    {\n      cost += control_cost_coeff[i] * (u_fb[i] * u_fb[i]) / (std_dev[i] * std_dev[i]);\n    }\n  }\n  return 0.5f * lambda * (1.0f - alpha) * cost;\n}\n\nGAUSSIAN_TEMPLATE\n__host__ float GAUSSIAN_CLASS::computeFeedbackCost(const Eigen::Ref<const control_array>& u_fb, const int t,\n                                                   const int distribution_idx, const float lambda, const float alpha)\n{\n  float cost = 0.0f;\n  const int distribution_i = distribution_idx >= this->params_.num_distributions ? 0 : distribution_idx;\n  float* std_dev = &(this->params_.std_dev[CONTROL_DIM * distribution_i]);\n  if (this->params_.time_specific_std_dev)\n  {\n    std_dev = &(this->params_.std_dev[(distribution_i * this->getNumTimesteps() + t) * CONTROL_DIM]);\n  }\n  for (int i = 0; i < CONTROL_DIM; i++)\n  {\n    cost += this->params_.control_cost_coeff[i] * u_fb(i) * u_fb(i) / (std_dev[i] * std_dev[i]);\n  }\n  return 0.5f * lambda * (1.0f - alpha) * cost;\n}\n\nGAUSSIAN_TEMPLATE\n__host__ float GAUSSIAN_CLASS::computeLikelihoodRatioCost(const Eigen::Ref<const control_array>& u,\n                                                          const int sample_index, const int t,\n                                                          const int distribution_idx, const float lambda,\n                                                          const float alpha)\n{\n  float cost = 0.0f;\n  const int distribution_i = distribution_idx >= this->params_.num_distributions ? 0 : distribution_idx;\n  const int mean_index = (distribution_i * this->getNumTimesteps() + t) * CONTROL_DIM;\n  float* mean = &(this->means_[mean_index]);\n  float* std_dev = &(this->params_.std_dev[CONTROL_DIM * distribution_i]);\n  control_array zero_mean = control_array::Zero();\n  if (sample_index >= (1.0f - this->params_.pure_noise_trajectories_percentage) * this->params_.num_rollouts)\n  {\n    mean = zero_mean.data();\n  }\n  if (this->params_.time_specific_std_dev)\n  {\n    std_dev = &(this->params_.std_dev[(distribution_i * this->getNumTimesteps() + t) * CONTROL_DIM]);\n  }\n  for (int i = 0; i < CONTROL_DIM; i++)\n  {\n    cost += this->params_.control_cost_coeff[i] * mean[i] * (mean[i] - 2.0f * u(i)) /\n            (std_dev[i] * std_dev[i]);  // Proper way\n  }\n  return 0.5f * lambda * (1.0f - alpha) * cost;\n}\n\nGAUSSIAN_TEMPLATE\n__host__ void GAUSSIAN_CLASS::copyImportanceSamplerToDevice(const float* importance_sampler,\n                                                            const int& distribution_idx, bool synchronize)\n{\n  HANDLE_ERROR(cudaMemcpyAsync(&control_means_d_[this->getNumTimesteps() * CONTROL_DIM * distribution_idx],\n                               importance_sampler, sizeof(float) * this->getNumTimesteps() * CONTROL_DIM,\n                               cudaMemcpyHostToDevice, this->stream_));\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n#undef GAUSSIAN_TEMPLATE\n#undef GAUSSIAN_CLASS\n\n}  // namespace sampling_distributions\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/sampling_distributions/gaussian/gaussian.cuh",
    "content": "#pragma once\n/**\n * Created by Bogdan Vlahov on 3/24/2023\n **/\n\n#include <mppi/sampling_distributions/sampling_distribution.cuh>\n\n#include <vector>\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\n__global__ void setGaussianControls(const float* __restrict__ mean_d, const float* __restrict__ std_dev_d,\n                                    float* __restrict__ control_samples_d, const int control_dim,\n                                    const int num_timesteps, const int num_rollouts, const int num_distributions,\n                                    const int optimization_stride, const float std_dev_decay,\n                                    const float pure_noise_percentage, const bool time_specific_std_dev = false);\n\n// Set the default number of distributions to 2 since that is currently the most we would use\ntemplate <int C_DIM, int MAX_DISTRIBUTIONS_T = 2>\nstruct GaussianParamsImpl : public SamplingParams<C_DIM>\n{\n  static const int MAX_DISTRIBUTIONS = MAX_DISTRIBUTIONS_T;\n  float std_dev[C_DIM * MAX_DISTRIBUTIONS] MPPI_ALIGN(sizeof(float4)) = { 0.0f };\n  float control_cost_coeff[C_DIM] MPPI_ALIGN(sizeof(float4)) = { 0.0f };\n  float pure_noise_trajectories_percentage = 0.01f;\n  float std_dev_decay = 1.0f;\n  // Kernel launching params\n  dim3 rewrite_controls_block_dim = dim3(32, 16, 1);\n  int sum_strides = 32;\n  // Various flags\n  bool time_specific_std_dev = false;\n\n  GaussianParamsImpl(int num_rollouts = 1, int num_timesteps = 1, int num_distributions = 1)\n    : SamplingParams<C_DIM>::SamplingParams(num_rollouts, num_timesteps, num_distributions)\n  {\n    for (int i = 0; i < this->CONTROL_DIM * MAX_DISTRIBUTIONS; i++)\n    {\n      std_dev[i] = 1.0f;\n    }\n  }\n\n  void copyStdDevToDistribution(const int src_distribution_idx, const int dest_distribution_idx)\n  {\n    bool src_out_of_distribution = src_distribution_idx >= MAX_DISTRIBUTIONS;\n    if (src_out_of_distribution || dest_distribution_idx >= MAX_DISTRIBUTIONS)\n    {\n      printf(\"%s Distribution %d is out of range. There are only %d total distributions\\n\",\n             src_out_of_distribution ? \"Src\" : \"Dest\",\n             src_out_of_distribution ? src_distribution_idx : dest_distribution_idx, MAX_DISTRIBUTIONS);\n      return;\n    }\n    float* std_dev_src = std_dev[this->CONTROL_DIM * src_distribution_idx];\n    float* std_dev_dest = std_dev[this->CONTROL_DIM * dest_distribution_idx];\n    for (int i = 0; i < this->CONTROL_DIM; i++)\n    {\n      std_dev_dest[i] = std_dev_src[i];\n    }\n  }\n};\n\ntemplate <int C_DIM>\nusing GaussianParams = GaussianParamsImpl<C_DIM, 2>;\n\ntemplate <int C_DIM, int MAX_TIMESTEPS = 1, int MAX_DISTRIBUTIONS_T = 2>\nstruct GaussianTimeVaryingStdDevParams : public GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS_T>\n{\n  float std_dev[C_DIM * MAX_TIMESTEPS * MAX_DISTRIBUTIONS_T] = { 0.0f };\n  GaussianTimeVaryingStdDevParams(int num_rollouts = 1, int num_timesteps = 1, int num_distributions = 1)\n    : GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS_T>::GaussianParamsImpl(num_rollouts, num_timesteps, num_distributions)\n  {\n    this->time_specific_std_dev = true;\n    for (int i = 0; i < this->CONTROL_DIM * MAX_TIMESTEPS * this->MAX_DISTRIBUTIONS; i++)\n    {\n      std_dev[i] = 1.0f;\n    }\n  }\n\n  void copyStdDevToDistribution(const int src_distribution_idx, const int dest_distribution_idx)\n  {\n    bool src_out_of_distribution = src_distribution_idx >= this->MAX_DISTRIBUTIONS;\n    if (src_out_of_distribution || dest_distribution_idx >= this->MAX_DISTRIBUTIONS)\n    {\n      printf(\"%s Distribution %d is out of range. There are only %d total distributions\\n\",\n             src_out_of_distribution ? \"Src\" : \"Dest\",\n             src_out_of_distribution ? src_distribution_idx : dest_distribution_idx, this->MAX_DISTRIBUTIONS);\n      return;\n    }\n    float* std_dev_src = std_dev[this->CONTROL_DIM * this->num_timesteps * src_distribution_idx];\n    float* std_dev_dest = std_dev[this->CONTROL_DIM * this->num_timesteps * dest_distribution_idx];\n    for (int i = 0; i < this->CONTROL_DIM * this->num_timesteps; i++)\n    {\n      std_dev_dest[i] = std_dev_src[i];\n    }\n  }\n};\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE = GaussianParams, class DYN_PARAMS_T = DynamicsParams>\nclass GaussianDistributionImpl : public SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n  using control_array = typename PARENT_CLASS::control_array;\n  static const int CONTROL_DIM = PARENT_CLASS::CONTROL_DIM;\n  typedef Eigen::Matrix<float, CONTROL_DIM, CONTROL_DIM> TEST_TYPE;\n\n  GaussianDistributionImpl(cudaStream_t stream = 0);\n  GaussianDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0);\n\n  ~GaussianDistributionImpl()\n  {\n    freeCudaMem();\n  }\n\n  __host__ virtual std::string getSamplingDistributionName() const override\n  {\n    return \"Gaussian\";\n  }\n\n  __host__ void allocateCUDAMemoryHelper();\n\n  __host__ __device__ float computeFeedbackCost(const float* __restrict__ u_fb, float* __restrict__ theta_d,\n                                                const int t, const int distribution_idx, const float lambda = 1.0,\n                                                const float alpha = 0.0);\n\n  __host__ float computeFeedbackCost(const Eigen::Ref<const control_array>& u_fb, const int t,\n                                     const int distribution_idx, const float lambda = 1.0, const float alpha = 0.0);\n\n  __host__ __device__ float computeLikelihoodRatioCost(const float* __restrict__ u, float* __restrict__ theta_d,\n                                                       const int sample_index, const int t, const int distribution_idx,\n                                                       const float lambda = 1.0, const float alpha = 0.0);\n\n  __host__ float computeLikelihoodRatioCost(const Eigen::Ref<const control_array>& u, const int sample_index,\n                                            const int t, const int distribution_idx, const float lambda = 1.0,\n                                            const float alpha = 0.0);\n\n  __host__ void copyImportanceSamplerToDevice(const float* importance_sampler, const int& distribution_idx,\n                                              bool synchronize = true);\n\n  __host__ void freeCudaMem();\n\n  __host__ void generateSamples(const int& optimization_stride, const int& iteration_num, curandGenerator_t& gen,\n                                bool synchronize = true);\n\n  __host__ void paramsToDevice(bool synchronize = true);\n\n  __host__ void setHostOptimalControlSequence(float* optimal_control_trajectory, const int& distribution_idx,\n                                              bool synchronize = true);\n\n  __host__ void setNumDistributions(const int num_distributions, bool synchronize = false)\n  {\n    if (num_distributions > SAMPLING_PARAMS_T::MAX_DISTRIBUTIONS)\n    {\n      this->logger_->error(\"GaussianParams can't handle more than %d distributions but %d were requested\\n\",\n                           SAMPLING_PARAMS_T::MAX_DISTRIBUTIONS, num_distributions);\n      throw std::out_of_range(\"Can't set num distributions higher than allowed in params\");\n    }\n    PARENT_CLASS::setNumDistributions(num_distributions, synchronize);\n  }\n\n  __host__ void updateDistributionParamsFromDevice(const float* trajectory_weights_d, float normalizer,\n                                                   const int& distribution_i, bool synchronize = false) override;\n\nprotected:\n  float* std_dev_d_ = nullptr;\n  float* control_means_d_ = nullptr;\n  std::vector<float> means_;\n};\n\ntemplate <class DYN_PARAMS_T>\nclass GaussianDistribution\n  : public GaussianDistributionImpl<GaussianDistribution<DYN_PARAMS_T>, GaussianParams, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = GaussianDistributionImpl<GaussianDistribution, GaussianParams, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n\n  GaussianDistribution(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n  GaussianDistribution(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0) : PARENT_CLASS(params, stream)\n  {\n  }\n};\n\ntemplate <int C_DIM, int MAX_DISTRIBUTIONS_T>\nconst int GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS_T>::MAX_DISTRIBUTIONS;\n}  // namespace sampling_distributions\n}  // namespace mppi\n\n#if __CUDACC__\n#include \"gaussian.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/sampling_distributions/nln/nln.cu",
    "content": "\n/**\n * Created by Bogdan Vlahov on Jan 7, 2024\n **/\n\n#define NLN_TEMPLATE template <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n#define NLN_NOISE NLNDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n\n#include <mppi/sampling_distributions/nln/nln.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n#include <mppi/utils/math_utils.h>\n\n__global__ void createNLNNoise(float* __restrict__ normal_noise, const float* __restrict__ log_normal_noise,\n                               const int num_trajectories, const int num_timesteps, const int control_dim)\n{\n  const int sample_index = blockIdx.x * blockDim.x + threadIdx.x;\n  const int time_index = blockIdx.y * blockDim.y + threadIdx.y;\n  const int control_index = blockIdx.z * blockDim.z + threadIdx.z;\n  if (sample_index < num_trajectories && time_index < num_timesteps && control_index < control_dim)\n  {\n    const int normal_index = (sample_index * num_timesteps + time_index) * control_dim + control_index;\n    const int log_normal_index = (control_index * num_trajectories + sample_index) * num_timesteps + time_index;\n    normal_noise[normal_index] = normal_noise[normal_index] * log_normal_noise[log_normal_index];\n  }\n}\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\nNLN_TEMPLATE\nNLN_NOISE::NLNDistributionImpl(cudaStream_t stream) : PARENT_CLASS::GaussianDistributionImpl(stream)\n{\n  calculateLogMeanAndVariance();\n}\n\nNLN_TEMPLATE\nNLN_NOISE::NLNDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS::GaussianDistributionImpl(params, stream)\n{\n  calculateLogMeanAndVariance();\n}\n\nNLN_TEMPLATE\n__host__ void NLN_NOISE::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    cudaFree(log_normal_noise_d_);\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\nNLN_TEMPLATE\n__host__ void NLN_NOISE::allocateCUDAMemoryHelper()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper();\n  if (this->GPUMemStatus_)\n  {\n    if (log_normal_noise_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(log_normal_noise_d_, this->stream_));\n    }\n    HANDLE_ERROR(cudaMallocAsync((void**)&log_normal_noise_d_,\n                                 sizeof(float) * this->CONTROL_DIM * this->getNumRollouts() * this->getNumTimesteps() *\n                                     this->getNumDistributions(),\n                                 this->stream_));\n  }\n}\n\nNLN_TEMPLATE\nvoid NLN_NOISE::setParams(const SAMPLING_PARAMS_T& params, bool synchronize)\n{\n  bool adjusted_variance = false;\n  for (int i = 0; i < this->CONTROL_DIM * this->getNumDistributions(); i++)\n  {\n    if (this->params_.std_dev[i] != params.std_dev[i])\n    {\n      adjusted_variance = true;\n      break;\n    }\n  }\n  PARENT_CLASS::setParams(params, synchronize);\n  if (adjusted_variance)\n  {\n    calculateLogMeanAndVariance();\n  }\n}\n\nNLN_TEMPLATE\nvoid NLN_NOISE::calculateLogMeanAndVariance()\n{\n  float normal_variance, log_variance;\n  log_noise_mean_.resize(this->CONTROL_DIM * this->getNumDistributions());\n  log_noise_std_dev_.resize(this->CONTROL_DIM * this->getNumDistributions());\n  for (int i = 0; i < this->CONTROL_DIM * this->getNumDistributions(); i++)\n  {\n    normal_variance = this->params_.std_dev[i] * this->params_.std_dev[i];\n    log_noise_mean_[i] = expf(0.5 * normal_variance);\n    float exp_normal_variance = expf(normal_variance);\n    log_variance = exp_normal_variance * (exp_normal_variance - 1.0f);\n    log_noise_std_dev_[i] = sqrtf(log_variance);\n  }\n}\n\nNLN_TEMPLATE\n__host__ void NLN_NOISE::generateSamples(const int& optimization_stride, const int& iteration_num,\n                                         curandGenerator_t& gen, bool synchronize)\n{\n  // generate log normal noise\n  for (int i = 0; i < this->CONTROL_DIM; i++)\n  {\n    HANDLE_CURAND_ERROR(\n        curandGenerateLogNormal(gen, log_normal_noise_d_ + i * this->getNumRollouts() * this->getNumTimesteps(),\n                                this->getNumRollouts() * this->getNumTimesteps(), 0.0f, this->params_.std_dev[i]));\n  }\n  // generate normal noise\n  HANDLE_CURAND_ERROR(curandGenerateNormal(gen, this->control_samples_d_,\n                                           this->getNumTimesteps() * this->getNumRollouts() * CONTROL_DIM, 0.0f, 1.0f));\n\n  // create NLN noise by multiplying normal and log normal noise\n  const int BLOCKSIZE_X = this->params_.rewrite_controls_block_dim.x;\n  const int BLOCKSIZE_Y = this->params_.rewrite_controls_block_dim.y;\n  const int BLOCKSIZE_Z = this->params_.rewrite_controls_block_dim.z;\n  dim3 combine_noise_grid;\n  combine_noise_grid.x = mppi::math::int_ceil(this->getNumRollouts(), BLOCKSIZE_X);\n  combine_noise_grid.y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  combine_noise_grid.z = mppi::math::int_ceil(this->CONTROL_DIM, BLOCKSIZE_Z);\n  createNLNNoise<<<combine_noise_grid, this->params_.rewrite_controls_block_dim, 0, this->stream_>>>(\n      this->control_samples_d_, log_normal_noise_d_, this->getNumRollouts(), this->getNumTimesteps(),\n      this->CONTROL_DIM);\n  HANDLE_ERROR(cudaGetLastError());\n  for (int i = 1; i < this->getNumDistributions(); i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(\n        &this->control_samples_d_[this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM * i],\n        this->control_samples_d_, sizeof(float) * this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM,\n        cudaMemcpyDeviceToDevice, this->stream_));\n  }\n  /**\n   * Generate noise samples with mean added\n   **/\n  dim3 control_writing_grid;\n  control_writing_grid.x = mppi::math::int_ceil(this->getNumRollouts(), BLOCKSIZE_X);\n  control_writing_grid.y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  control_writing_grid.z = mppi::math::int_ceil(this->getNumDistributions(), BLOCKSIZE_Z);\n  unsigned int std_dev_mem_size = this->getNumDistributions() * CONTROL_DIM;\n  // Allocate shared memory for std_deviations per timestep or constant across the trajectory\n  std_dev_mem_size = mppi::math::nearest_multiple_4(\n      this->params_.time_specific_std_dev ? std_dev_mem_size * this->getNumTimesteps() : std_dev_mem_size);\n  unsigned int shared_mem_size =\n      std_dev_mem_size +\n      mppi::math::nearest_multiple_4(this->getNumDistributions() * this->getNumTimesteps() * CONTROL_DIM) +\n      mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Y * BLOCKSIZE_Z * CONTROL_DIM);\n  shared_mem_size *= sizeof(float);\n  setGaussianControls<<<control_writing_grid, this->params_.rewrite_controls_block_dim, shared_mem_size,\n                        this->stream_>>>(\n      this->control_means_d_, this->std_dev_d_, this->control_samples_d_, CONTROL_DIM, this->getNumTimesteps(),\n      this->getNumRollouts(), this->getNumDistributions(), optimization_stride,\n      powf(this->params_.std_dev_decay, iteration_num), this->params_.pure_noise_trajectories_percentage,\n      this->params_.time_specific_std_dev);\n\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n}  // namespace sampling_distributions\n}  // namespace mppi\n#undef NLN_NOISE\n#undef NLN_TEMPLATE\n"
  },
  {
    "path": "include/mppi/sampling_distributions/nln/nln.cuh",
    "content": "#pragma once\n/**\n * Created by Bogdan, Jan 7, 2024\n * based off of https://github.com/IhabMohamed/log-MPPI_ros\n */\n\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n\n#include <vector>\n\n__global__ void createNLNNoise(float* __restrict__ normal_noise, const float* __restrict__ log_normal_noise,\n                               const int num_trajectories, const int num_timesteps, const int control_dim);\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE = GaussianParams, class DYN_PARAMS_T = DynamicsParams>\nclass NLNDistributionImpl : public GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n  using control_array = typename PARENT_CLASS::control_array;\n\n  static const int CONTROL_DIM = PARENT_CLASS::CONTROL_DIM;\n\n  NLNDistributionImpl(cudaStream_t stream = 0);\n  NLNDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0);\n\n  ~NLNDistributionImpl()\n  {\n    freeCudaMem();\n  }\n\n  __host__ virtual std::string getSamplingDistributionName() const override\n  {\n    return \"NLN\";\n  }\n\n  void setParams(const SAMPLING_PARAMS_T& params, bool synchronize = true);\n\n  void calculateLogMeanAndVariance();\n\n  __host__ void allocateCUDAMemoryHelper();\n\n  __host__ void freeCudaMem();\n\n  __host__ void generateSamples(const int& optimization_stride, const int& iteration_num, curandGenerator_t& gen,\n                                bool synchronize = true);\n\nprotected:\n  float* log_normal_noise_d_ = nullptr;\n  std::vector<float> log_noise_mean_;\n  std::vector<float> log_noise_std_dev_;\n};\n\ntemplate <class DYN_PARAMS_T>\nclass NLNDistribution : public NLNDistributionImpl<NLNDistribution<DYN_PARAMS_T>, GaussianParams, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = NLNDistributionImpl<NLNDistribution, GaussianParams, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n\n  NLNDistribution(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n  NLNDistribution(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0) : PARENT_CLASS(params, stream)\n  {\n  }\n};\n}  // namespace sampling_distributions\n}  // namespace mppi\n\n#ifdef __CUDACC__\n#include \"nln.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/sampling_distributions/piecewise_linear/piecewise_linear_noise.cuh",
    "content": "#pragma once\n/**\n * Created by David, Apr 11, 2021\n */\n\n#include <curand.h>\n#include <Eigen/Dense>\n#include <mppi/utils/gpu_err_chk.cuh>\n\n#include <algorithm>\n#include <iostream>\n#include <vector>\n\n__global__ void createPiecewiseLinearNoise(const int num_timesteps, const int num_trajectories, const int control_dim,\n                                           const int num_piecewise_segments, const int optimization_stride,\n                                           const float* scale_piecewise_noise, const float* frac_add_nominal_traj,\n                                           const float* scale_add_nominal_noise, const unsigned int* switch_num,\n                                           const float* switch_times, const float* switch_values,\n                                           float* nominal_control, const float* control_std_dev, float* output)\n{\n  int sample_index = blockIdx.x * blockDim.x + threadIdx.x;\n  int time_index = blockIdx.y * blockDim.y + threadIdx.y;\n  int control_index = blockIdx.z * blockDim.z + threadIdx.z;\n  if (sample_index >= num_trajectories || time_index >= num_timesteps || control_index >= control_dim)\n  {\n    return;\n  }\n\n  float output_val = 0.0;\n\n  if (sample_index == 1 || time_index < optimization_stride)\n  {\n    // if sample index = 1, use nominal control\n    // sample_index = 0 is replaced by 0 controls later in the kernel\n    output_val = nominal_control[time_index * control_dim + control_index];\n  }\n  else if (float(sample_index) < frac_add_nominal_traj[0] * float(num_trajectories))\n  {\n    // randomly vary output_val for frac_add_nominal_traj fraction of the trajectories\n    output_val =\n        output[(sample_index * num_timesteps + time_index) * control_dim + control_index] * scale_add_nominal_noise[0];\n    output_val += nominal_control[time_index * control_dim + control_index];\n  }\n  else\n  {\n    // all others, use piecewise linear noise.\n\n    // determine start/stop times and piecewise fraction\n    int switch_num_val = switch_num[sample_index * control_dim + control_index];\n    switch_num_val = min(switch_num_val, num_piecewise_segments);\n    int segment_index = 0;\n    float start_time = 0.0f;\n    float end_time = 1.0f;\n    float time_frac = float(time_index) / float(num_timesteps);\n\n    for (int i = 0; i < switch_num_val; i++)\n    {\n      float switch_time = switch_times[(sample_index * (num_piecewise_segments + 1) + i) * control_dim + control_index];\n      if (switch_time < time_frac)\n      {\n        segment_index += 1;\n        if (start_time < switch_time)\n        {\n          start_time = switch_time;\n        }\n      }\n      if (switch_time > time_frac)\n      {\n        if (end_time > switch_time)\n        {\n          end_time = switch_time;\n        }\n      }\n    }\n\n    // compute noise, interpolated between first and second value\n    float first_val = 0;\n    if (start_time < float(optimization_stride) / float(num_timesteps))\n    {  // first value should always be nominal control at optimization stride start\n      first_val = nominal_control[optimization_stride * control_dim + control_index];\n      start_time = float(optimization_stride) / float(num_timesteps);\n    }\n    else\n    {\n      first_val =\n          switch_values[(sample_index * (num_piecewise_segments + 1) + segment_index) * control_dim + control_index];\n    }\n    float second_val =\n        switch_values[(sample_index * (num_piecewise_segments + 1) + segment_index + 1) * control_dim + control_index];\n    float frac_interval = (time_frac - start_time) / (end_time - start_time);\n    output_val = (1.0f - frac_interval) * first_val + frac_interval * second_val;\n    output_val = output_val * 2.0 - 1.0;                             // scale to [-1, 1]\n    output_val = output_val * scale_piecewise_noise[control_index];  // scale by scale_piecewise_noise\n\n    if (float(sample_index) < (frac_add_nominal_traj[0] + frac_add_nominal_traj[1]) * float(num_trajectories))\n    {\n      // randomly vary output_val for frac_add_nominal_traj[1] fraction of the trajectories\n      output_val = output_val * scale_add_nominal_noise[1];\n      output_val += nominal_control[time_index * control_dim + control_index];\n    }\n  }\n\n  // control noise output gets scaled by the kernel, and added to nominal control.\n  // so we need to undo these operations to get exactly the control trajectory we want.\n  output[(sample_index * num_timesteps + time_index) * control_dim + control_index] =\n      output_val / control_std_dev[control_index] - nominal_control[time_index * control_dim + control_index];\n}\n\nvoid piecewise_linear_noise(const int num_timesteps, const int num_trajectories, const int control_dim,\n                            const int num_piecewise_segments, const int optimization_stride,\n                            std::vector<float>& scale_piecewise_noise, std::vector<float>& frac_add_nominal_traj,\n                            std::vector<float>& scale_add_nominal_noise, float* control_d, float* control_noise_d,\n                            const float* control_std_dev_d, curandGenerator_t& gen, cudaStream_t stream = 0)\n{\n  // generate piecewise linear random noise, in 2 steps:\n  // 1. randomly decide start/stop times of segments (uniformly divide t=0:T)\n  // 2. randomly decide start and stop values of segments (uniform within bounds)\n\n  const int BLOCKSIZE_X = 32;\n  const int BLOCKSIZE_Y = 32;\n  const int BLOCKSIZE_Z = 1;\n\n  // try to sample num_piecewise_segments with reasonable poisson distribution\n  const float switch_num_poisson_lambda = 0.5f * float(num_piecewise_segments);\n\n  // create random start/stop times and values\n  // curandSetStream(gen, stream);\n  unsigned int* switch_num_d;\n  float* switch_times_d;\n  float* switch_values_d;\n  HANDLE_ERROR(cudaMalloc((void**)&switch_num_d, sizeof(unsigned int) * num_trajectories * control_dim));\n  HANDLE_ERROR(cudaMalloc((void**)&switch_times_d,\n                          sizeof(float) * num_trajectories * (num_piecewise_segments + 1) * control_dim));\n  HANDLE_ERROR(cudaMalloc((void**)&switch_values_d,\n                          sizeof(float) * num_trajectories * (num_piecewise_segments + 1) * control_dim));\n  HANDLE_CURAND_ERROR(curandGeneratePoisson(gen, (unsigned int*)switch_num_d, num_trajectories * control_dim,\n                                            switch_num_poisson_lambda));\n  HANDLE_CURAND_ERROR(curandGenerateUniform(gen, (float*)switch_times_d,\n                                            num_trajectories * (num_piecewise_segments + 1) * control_dim));\n  HANDLE_CURAND_ERROR(curandGenerateUniform(gen, (float*)switch_values_d,\n                                            num_trajectories * (num_piecewise_segments + 1) * control_dim));\n\n  float* scale_piecewise_noise_d;\n  float* frac_add_nominal_traj_d;\n  float* scale_add_nominal_noise_d;\n  HANDLE_ERROR(cudaMalloc((void**)&scale_piecewise_noise_d, sizeof(float) * control_dim));\n  HANDLE_ERROR(cudaMalloc((void**)&frac_add_nominal_traj_d, sizeof(float) * control_dim));\n  HANDLE_ERROR(cudaMalloc((void**)&scale_add_nominal_noise_d, sizeof(float) * control_dim));\n  HANDLE_ERROR(cudaMemcpy(scale_piecewise_noise_d, scale_piecewise_noise.data(), sizeof(float) * control_dim,\n                          cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(frac_add_nominal_traj_d, frac_add_nominal_traj.data(), sizeof(float) * control_dim,\n                          cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(scale_add_nominal_noise_d, scale_add_nominal_noise.data(), sizeof(float) * control_dim,\n                          cudaMemcpyHostToDevice));\n\n  // create piecewise linear noise\n  const int grid_x = (num_trajectories - 1) / BLOCKSIZE_X + 1;\n  const int grid_y = (num_timesteps - 1) / BLOCKSIZE_Y + 1;\n  const int grid_z = (control_dim - 1) / BLOCKSIZE_Z + 1;\n  dim3 grid(grid_x, grid_y, grid_z);\n  dim3 block(BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z);\n  createPiecewiseLinearNoise<<<grid, block, 0, stream>>>(\n      num_timesteps, num_trajectories, control_dim, num_piecewise_segments, optimization_stride,\n      scale_piecewise_noise_d, frac_add_nominal_traj_d, scale_add_nominal_noise_d, switch_num_d, switch_times_d,\n      switch_values_d, control_d, control_std_dev_d, control_noise_d);\n\n  // cleanup\n  HANDLE_ERROR(cudaGetLastError());\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  HANDLE_ERROR(cudaFree(switch_times_d));\n  HANDLE_ERROR(cudaFree(switch_values_d));\n  HANDLE_ERROR(cudaFree(scale_piecewise_noise_d));\n}\n"
  },
  {
    "path": "include/mppi/sampling_distributions/sampling_distribution.cu",
    "content": "/*\nCreated by Bogdan Vlahov on 3/22/2023\n*/\n#include <mppi/sampling_distributions/sampling_distribution.cuh>\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\nvoid SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::GPUSetup()\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  if (!GPUMemStatus_)\n  {\n    sampling_d_ = Managed::GPUSetup<CLASS_T>(derived);\n    allocateCUDAMemory();\n    resizeVisualizationControlTrajectories(true);\n  }\n  else\n  {\n    this->logger_->debug(\"%s: GPU Memory already set.\\n\", derived->getSamplingDistributionName().c_str());\n  }\n  derived->paramsToDevice();\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::freeCudaMem()\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFree(sampling_d_));\n    HANDLE_ERROR(cudaFree(control_samples_d_));\n    HANDLE_ERROR(cudaFree(vis_control_samples_d_));\n    GPUMemStatus_ = false;\n    sampling_d_ = nullptr;\n    control_samples_d_ = nullptr;\n    vis_control_samples_d_ = nullptr;\n  }\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__device__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::initializeDistributions(\n    const float* __restrict__ output, const float t_0, const float dt, float* __restrict__ theta_d)\n{\n  SAMPLING_PARAMS_T* shared = reinterpret_cast<SAMPLING_PARAMS_T*>(theta_d);\n  *shared = this->params_;\n  // #ifdef __CUDA_ARCH__\n  //   if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0)\n  //   {\n  //     printf(\"Num timesteps: %d %d\\n\", this->params_.num_timesteps, shared->num_timesteps);\n  //   }\n  // #endif\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::paramsToDevice(bool synchronize)\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&sampling_d_->params_, &params_, sizeof(SAMPLING_PARAMS_T), cudaMemcpyHostToDevice, stream_));\n    if (synchronize)\n    {\n      HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    }\n  }\n}\n\n// By default, call the update from device method by first putting the weights into gpu memory\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::updateDistributionParamsFromHost(\n    const Eigen::Ref<const Eigen::MatrixXf>& trajectory_weights, float normalizer, const int& distribution_i,\n    bool synchronize)\n{\n  float* trajectory_weights_d;\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n  HANDLE_ERROR(cudaMallocAsync((void**)&trajectory_weights_d, sizeof(float) * this->getNumRollouts(), this->stream_));\n#else\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_weights_d, sizeof(float) * this->getNumRollouts()));\n#endif\n  HANDLE_ERROR(cudaMemcpyAsync(trajectory_weights_d, trajectory_weights.data(), sizeof(float) * this->getNumRollouts(),\n                               cudaMemcpyHostToDevice, this->stream_));\n  updateDistributionParamsFromDevice(trajectory_weights_d, normalizer, distribution_i, false);\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n  HANDLE_ERROR(cudaFreeAsync(trajectory_weights_d, this->stream_));\n#else\n  HANDLE_ERROR(cudaFree(trajectory_weights_d));\n#endif\n\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::allocateCUDAMemory(bool synchronize)\n{\n  if (GPUMemStatus_)\n  {\n    if (control_samples_d_)\n    {  // deallocate previous memory for control samples\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n      HANDLE_ERROR(cudaFreeAsync(control_samples_d_, stream_));\n#else\n      HANDLE_ERROR(cudaFree(control_samples_d_));\n#endif\n      // control_samples_d_ = nullptr;\n    }\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n    HANDLE_ERROR(cudaMallocAsync((void**)&control_samples_d_,\n                                 sizeof(float) * this->getNumDistributions() * this->getNumRollouts() *\n                                     this->getNumTimesteps() * CONTROL_DIM,\n                                 stream_));\n#else\n    HANDLE_ERROR(cudaMalloc((void**)&control_samples_d_, sizeof(float) * this->getNumDistributions() *\n                                                             this->getNumRollouts() * this->getNumTimesteps() *\n                                                             CONTROL_DIM));\n#endif\n    HANDLE_ERROR(cudaMemcpyAsync(&sampling_d_->control_samples_d_, &control_samples_d_, sizeof(float*),\n                                 cudaMemcpyHostToDevice, stream_));\n  }\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  derived->allocateCUDAMemoryHelper();\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream_));\n  }\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ void\nSamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::resizeVisualizationControlTrajectories(bool synchronize)\n{\n  if (GPUMemStatus_)\n  {\n    if (vis_control_samples_d_)\n    {  // deallocate previous memory for control samples\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n      HANDLE_ERROR(cudaFreeAsync(vis_control_samples_d_, vis_stream_));\n#else\n      HANDLE_ERROR(cudaFree(vis_control_samples_d_));\n#endif\n      // vis_control_samples_d_ = nullptr;\n    }\n    if (this->getNumVisRollouts() == 0)\n    {  // No need to allocate mmemory if it will end up being zero\n      vis_control_samples_d_ = nullptr;\n    }\n    else\n    {\n#if defined(CUDART_VERSION) && CUDART_VERSION > 11200\n      HANDLE_ERROR(cudaMallocAsync((void**)&vis_control_samples_d_,\n                                   sizeof(float) * this->getNumDistributions() * this->getNumVisRollouts() *\n                                       this->getNumTimesteps() * CONTROL_DIM,\n                                   vis_stream_));\n#else\n      HANDLE_ERROR(cudaMalloc((void**)&vis_control_samples_d_, sizeof(float) * this->getNumDistributions() *\n                                                                   this->getNumVisRollouts() * this->getNumTimesteps() *\n                                                                   CONTROL_DIM));\n#endif\n    }\n    HANDLE_ERROR(cudaMemcpyAsync(&sampling_d_->vis_control_samples_d_, &vis_control_samples_d_, sizeof(float*),\n                                 cudaMemcpyHostToDevice, vis_stream_));\n  }\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(vis_stream_));\n  }\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__device__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::readControlSample(\n    const int& sample_index, const int& t, const int& distribution_index, float* __restrict__ control,\n    float* __restrict__ theta_d, const int& block_size, const int& thread_index, const float* __restrict__ output)\n{\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n  const int distribution_i = distribution_index >= params_p->num_distributions ? 0 : distribution_index;\n  const int control_index =\n      ((params_p->num_rollouts * distribution_i + sample_index) * params_p->num_timesteps + t) * CONTROL_DIM;\n  if (CONTROL_DIM % 4 == 0)\n  {\n    float4* u4 = reinterpret_cast<float4*>(control);\n    const float4* u4_d = reinterpret_cast<const float4*>(&(this->control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM / 4; i += block_size)\n    {\n      u4[i] = u4_d[i];\n    }\n  }\n  else if (CONTROL_DIM % 2 == 0)\n  {\n    float2* u2 = reinterpret_cast<float2*>(control);\n    const float2* u2_d = reinterpret_cast<const float2*>(&(this->control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM / 2; i += block_size)\n    {\n      u2[i] = u2_d[i];\n    }\n  }\n  else\n  {\n    float* u = reinterpret_cast<float*>(control);\n    const float* u_d = reinterpret_cast<const float*>(&(this->control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM; i += block_size)\n    {\n      u[i] = u_d[i];\n    }\n  }\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__device__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::readVisControlSample(\n    const int& sample_index, const int& t, const int& distribution_index, float* __restrict__ control,\n    float* __restrict__ theta_d, const int& block_size, const int& thread_index, const float* __restrict__ output)\n{\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n  const int distribution_i = distribution_index >= params_p->num_distributions ? 0 : distribution_index;\n  const int control_index =\n      ((params_p->num_visualization_rollouts * distribution_i + sample_index) * params_p->num_timesteps + t) *\n      CONTROL_DIM;\n  if (CONTROL_DIM % 4 == 0)\n  {\n    float4* u4 = reinterpret_cast<float4*>(control);\n    const float4* u4_d = reinterpret_cast<const float4*>(&(this->vis_control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM / 4; i += block_size)\n    {\n      u4[i] = u4_d[i];\n    }\n  }\n  else if (CONTROL_DIM % 2 == 0)\n  {\n    float2* u2 = reinterpret_cast<float2*>(control);\n    const float2* u2_d = reinterpret_cast<const float2*>(&(this->vis_control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM / 2; i += block_size)\n    {\n      u2[i] = u2_d[i];\n    }\n  }\n  else\n  {\n    float* u = reinterpret_cast<float*>(control);\n    const float* u_d = reinterpret_cast<const float*>(&(this->vis_control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM; i += block_size)\n    {\n      u[i] = u_d[i];\n    }\n  }\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ __device__ float* SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::getControlSample(\n    const int& sample_index, const int& t, const int& distribution_index, const float* __restrict__ theta_d,\n    const float* __restrict__ output)\n{\n#ifdef __CUDA_ARCH__\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n#else\n  SAMPLING_PARAMS_T* params_p = &this->params_;\n#endif\n  const int distribution_i = distribution_index >= params_p->num_distributions ? 0 : distribution_index;\n  return &this->control_samples_d_[((params_p->num_rollouts * distribution_i + sample_index) * params_p->num_timesteps +\n                                    t) *\n                                   CONTROL_DIM];\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__host__ __device__ float* SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::getVisControlSample(\n    const int& sample_index, const int& t, const int& distribution_index, const float* __restrict__ theta_d,\n    const float* __restrict__ output)\n{\n#ifdef __CUDA_ARCH__\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n#else\n  SAMPLING_PARAMS_T* params_p = &this->params_;\n#endif\n  const int distribution_i = distribution_index >= params_p->num_distributions ? 0 : distribution_index;\n  return &this->vis_control_samples_d_\n              [((params_p->num_visualization_rollouts * distribution_i + sample_index) * params_p->num_timesteps + t) *\n               CONTROL_DIM];\n}\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n__device__ void SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::writeControlSample(\n    const int& sample_index, const int& t, const int& distribution_index, const float* __restrict__ control,\n    float* __restrict__ theta_d, const int& block_size, const int& thread_index, const float* __restrict__ output)\n{\n  SAMPLING_PARAMS_T* params_p = (SAMPLING_PARAMS_T*)theta_d;\n  const int distribution_i = distribution_index >= params_p->num_distributions ? 0 : distribution_index;\n  const int control_index =\n      ((params_p->num_rollouts * distribution_i + sample_index) * params_p->num_timesteps + t) * CONTROL_DIM;\n  if (CONTROL_DIM % 4 == 0)\n  {\n    const float4* u4 = reinterpret_cast<const float4*>(control);\n    float4* u4_d = reinterpret_cast<float4*>(&(this->control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM / 4; i += block_size)\n    {\n      u4_d[i] = u4[i];\n    }\n  }\n  else if (CONTROL_DIM % 2 == 0)\n  {\n    const float2* u2 = reinterpret_cast<const float2*>(control);\n    float2* u2_d = reinterpret_cast<float2*>(&(this->control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM / 2; i += block_size)\n    {\n      u2_d[i] = u2[i];\n    }\n  }\n  else\n  {\n    const float* u = reinterpret_cast<const float*>(control);\n    float* u_d = reinterpret_cast<float*>(&(this->control_samples_d_[control_index]));\n    for (int i = thread_index; i < CONTROL_DIM; i += block_size)\n    {\n      u_d[i] = u[i];\n    }\n  }\n}\n"
  },
  {
    "path": "include/mppi/sampling_distributions/sampling_distribution.cuh",
    "content": "#pragma once\n/*\nCreated by Bogdan Vlahov on 3/22/2023\n*/\n\n#include <mppi/dynamics/dynamics.cuh>\n#include <mppi/utils/managed.cuh>\n\n#include <string>\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\n// Auto-align all Parameter structures to float4 so that any data after it in saved memory has full memory alignment\ntemplate <int C_DIM>\nstruct alignas(float4) SamplingParams\n{\n  static const int CONTROL_DIM = C_DIM;\n  bool use_same_noise_for_all_distributions = true;\n  int num_rollouts = 1;\n  int num_timesteps = 1;\n  int num_distributions = 1;\n  int num_visualization_rollouts = 0;\n  SamplingParams(int num_rollouts, int num_timesteps, int num_distributions = 1)\n    : num_rollouts{ num_rollouts }, num_timesteps{ num_timesteps }, num_distributions{ num_distributions }\n  {\n  }\n  SamplingParams() = default;\n};\n\ntemplate <class CLASS_T, template <int> typename PARAMS_TEMPLATE, class DYN_PARAMS_T>\nclass SamplingDistribution : public Managed\n{\npublic:\n  /*************************************\n   * Setup typedefs and aliases\n   *************************************/\n\n  typedef CLASS_T SAMPLING_T;\n  using ControlIndex = typename DYN_PARAMS_T::ControlIndex;\n  using OutputIndex = typename DYN_PARAMS_T::OutputIndex;\n  using TEMPLATED_DYN_PARAMS = DYN_PARAMS_T;\n\n  static const int CONTROL_DIM = C_IND_CLASS(DYN_PARAMS_T, NUM_CONTROLS);\n  // static const int CONTROL_DIM = E_INDEX(ControlIndex, NUM_CONTROLS);\n  typedef PARAMS_TEMPLATE<CONTROL_DIM> SAMPLING_PARAMS_T;\n  typedef Eigen::Matrix<float, CONTROL_DIM, 1> control_array;\n\n  static_assert(std::is_base_of<SamplingParams<CONTROL_DIM>, SAMPLING_PARAMS_T>::value,\n                \"Sampling Distribution PARAMS_T does not inherit from SamplingParams\");\n\n  /*************************************\n   * Constructors and Destructors\n   *************************************/\n\n  SamplingDistribution(cudaStream_t stream = 0) : Managed(stream)\n  {\n    this->SHARED_MEM_REQUEST_GRD_BYTES = sizeof(SAMPLING_PARAMS_T);\n    this->SHARED_MEM_REQUEST_BLK_BYTES = 0;\n  }\n  SamplingDistribution(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0) : params_{ params }, Managed(stream)\n  {\n    this->SHARED_MEM_REQUEST_GRD_BYTES = sizeof(SAMPLING_PARAMS_T);\n    this->SHARED_MEM_REQUEST_BLK_BYTES = 0;\n  }\n\n  virtual ~SamplingDistribution()\n  {\n    freeCudaMem();\n  }\n\n  /**\n   * @brief Get the Sampling Distribution Name object\n   *\n   * @return std::string - name of the sampling distribution\n   */\n  __host__ virtual std::string getSamplingDistributionName() const\n  {\n    return \"Sampling distribution name not set\";\n  }\n\n  /*************************************\n   * DEFAULT CLASS METHODS THAT SHOULD NOT NEED OVERWRITING\n   *************************************/\n\n  void GPUSetup();\n\n  /**\n   * Updates the sampling distribution parameters\n   * @param params\n   */\n  void setParams(const SAMPLING_PARAMS_T& params, bool synchronize = true)\n  {\n    bool reallocate_memory = params_.num_timesteps != params.num_timesteps ||\n                             params_.num_rollouts != params.num_rollouts ||\n                             params_.num_distributions != params.num_distributions;\n    bool reallocate_vis_memory = params_.num_timesteps != params.num_timesteps ||\n                                 params_.num_visualization_rollouts != params.num_visualization_rollouts ||\n                                 params_.num_distributions != params.num_distributions;\n\n    params_ = params;\n    if (GPUMemStatus_)\n    {\n      if (reallocate_memory)\n      {\n        allocateCUDAMemory(false);\n      }\n      if (reallocate_vis_memory)\n      {\n        resizeVisualizationControlTrajectories(true);\n      }\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice(synchronize);\n    }\n  }\n\n  // __host__ __device__ SAMPLING_PARAMS_T getParams() const\n  // {\n  //   return params_;\n  // }\n\n  __host__ __device__ const SAMPLING_PARAMS_T getParams() const\n  {\n    return params_;\n  }\n\n  __host__ __device__ int getNumTimesteps() const\n  {\n    return this->params_.num_timesteps;\n  }\n\n  __host__ __device__ int getNumRollouts() const\n  {\n    return this->params_.num_rollouts;\n  }\n\n  __host__ __device__ int getNumVisRollouts() const\n  {\n    return this->params_.num_visualization_rollouts;\n  }\n\n  __host__ __device__ int getNumDistributions() const\n  {\n    return this->params_.num_distributions;\n  }\n\n  __host__ void setNumTimesteps(const int num_timesteps, bool synchronize = false)\n  {\n    const bool reallocate_memory = params_.num_timesteps != num_timesteps;\n    this->params_.num_timesteps = num_timesteps;\n    if (GPUMemStatus_ && reallocate_memory)\n    {\n      if (reallocate_memory)\n      {\n        allocateCUDAMemory(false);\n        resizeVisualizationControlTrajectories(true);\n      }\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice(synchronize);\n    }\n  }\n\n  __host__ void setNumVisRollouts(const int num_visualization_rollouts, bool synchronize = false)\n  {\n    const bool reallocate_memory = params_.num_visualization_rollouts != num_visualization_rollouts;\n    this->params_.num_visualization_rollouts = num_visualization_rollouts;\n    if (GPUMemStatus_ && reallocate_memory)\n    {\n      if (reallocate_memory)\n      {\n        resizeVisualizationControlTrajectories(true);\n      }\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice(synchronize);\n    }\n  }\n\n  __host__ void setNumRollouts(const int num_rollouts, bool synchronize = false)\n  {\n    const bool reallocate_memory = params_.num_rollouts != num_rollouts;\n    this->params_.num_rollouts = num_rollouts;\n    if (GPUMemStatus_ && reallocate_memory)\n    {\n      if (reallocate_memory)\n      {\n        allocateCUDAMemory(false);\n      }\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice(synchronize);\n    }\n  }\n\n  __host__ void setNumDistributions(const int num_distributions, bool synchronize = false)\n  {\n    const bool reallocate_memory = params_.num_distributions != num_distributions;\n    this->params_.num_distributions = num_distributions;\n    if (GPUMemStatus_ && reallocate_memory)\n    {\n      if (reallocate_memory)\n      {\n        allocateCUDAMemory(false);\n        resizeVisualizationControlTrajectories(true);\n      }\n      CLASS_T& derived = static_cast<CLASS_T&>(*this);\n      derived.paramsToDevice(synchronize);\n    }\n  }\n\n  __host__ void resizeVisualizationControlTrajectories(bool synchronize = true);\n\n  __host__ void setVisStream(cudaStream_t stream)\n  {\n    vis_stream_ = stream;\n  }\n\n  __host__ void allocateCUDAMemory(bool synchronize = false);\n\n  /**\n   * @brief deallocates the allocated cuda memory for the sampling distribution\n   */\n  __host__ void freeCudaMem();\n\n  /**\n   * @brief Get a pointer to a specific control sample. This is useful for plugging into methods like enforceConstraints\n   *\n   * @param sample_index - sample number out of num_rollouts\n   * @param t - timestep out of num_timesteps\n   * @param distribution_index - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param output - output pointer for compatibility with a output-based sampling distribution\n   * @return float* pointer to the control array that is at [distribution_index][sample_index][t]\n   */\n  __host__ __device__ float* getControlSample(const int& sample_index, const int& t, const int& distribution_index,\n                                              const float* __restrict__ theta_d = nullptr,\n                                              const float* __restrict__ output = nullptr);\n\n  /**\n   * @brief Get a pointer to a specific visualization control sample.\n   *\n   * @param sample_index - sample number out of num_visualization_rollouts\n   * @param t - timestep out of num_timesteps\n   * @param distribution_index - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param output - output pointer for compatibility with a output-based sampling distribution\n   * @return float* pointer to the control array that is at [distribution_index][sample_index][t]\n   */\n  __host__ __device__ float* getVisControlSample(const int& sample_index, const int& t, const int& distribution_index,\n                                                 const float* __restrict__ theta_d = nullptr,\n                                                 const float* __restrict__ output = nullptr);\n\n  /**\n   * @brief Method for starting up any potential work for distributions. By default, it just loads the params into\n   * shared memory\n   *\n   * @param output - initial output\n   * @param t_0 - starting time\n   * @param dt - step size\n   * @param theta_d - shared memory pointer to sampling distribution space\n   */\n  __device__ void initializeDistributions(const float* __restrict__ output, const float t_0, const float dt,\n                                          float* __restrict__ theta_d);\n\n  __host__ void paramsToDevice(bool synchronize = true);\n\n  /**\n   * @brief Look up a specific control sample located at [distribution_index][sample_index][t] and put it into the\n   * control array\n   *\n   * @param sample_index - sample number out of num_rollouts\n   * @param t - timestep out of num_timesteps\n   * @param distribution_index - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param control - pointer to fill with the specific control array\n   * @param theta_d - shared memory pointer for passing through params\n   * @param block_size - parallelizable step size for the gpu (normally blockDim.y)\n   * @param thread_index - parallelizable index for the gpu (normally threadIdx.y)\n   * @param output - output pointer for compatibility with a output-based sampling distribution\n   */\n  __device__ void readControlSample(const int& sample_index, const int& t, const int& distribution_index,\n                                    float* __restrict__ control, float* __restrict__ theta_d, const int& block_size = 1,\n                                    const int& thread_index = 1, const float* __restrict__ output = nullptr);\n\n  /**\n   * @brief Look up a specific visualization control sample located at [distribution_index][sample_index][t] and put it\n   * into the control array\n   *\n   * @param sample_index - sample number out of num_visualization_rollouts\n   * @param t - timestep out of num_timesteps\n   * @param distribution_index - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param control - pointer to fill with the specific control array\n   * @param theta_d - shared memory pointer for passing through params\n   * @param block_size - parallelizable step size for the gpu (normally blockDim.y)\n   * @param thread_index - parallelizable index for the gpu (normally threadIdx.y)\n   * @param output - output pointer for compatibility with a output-based sampling distribution\n   */\n  __device__ void readVisControlSample(const int& sample_index, const int& t, const int& distribution_index,\n                                       float* __restrict__ control, float* __restrict__ theta_d,\n                                       const int& block_size = 1, const int& thread_index = 1,\n                                       const float* __restrict__ output = nullptr);\n\n  /**\n   * @brief Update the distribution according to the weights of each sample. Should only be used if weights only exist\n   * on the host side. Otherwise, use updateDistributionParamsFromDevice\n   *\n   * @param trajectory_weights - vector of size num_rollouts containing the weight of each sample\n   * @param normalizer - the sum of all trajectory weights\n   * @param distribution_i - which distribution to update\n   * @param synchronize - whether or not to run cudaStreamSynchronize\n   */\n  __host__ void updateDistributionParamsFromHost(const Eigen::Ref<const Eigen::MatrixXf>& trajectory_weights,\n                                                 float normalizer, const int& distribution_i, bool synchronize = false);\n\n  /*************************************\n   * Methods that need to be overwritten by derived classes\n   *************************************/\n\n  /**\n   * @brief method for allocating additional CUDA memory in derived\n   */\n  __host__ void allocateCUDAMemoryHelper();\n\n  __host__ __device__ float computeFeedbackCost(const float* __restrict__ u_fb, float* __restrict__ theta_d,\n                                                const int t, const int distribution_idx, const float lambda = 1.0,\n                                                const float alpha = 0.0);\n\n  /**\n   * @brief host method to calculate the cost for feedback based on the sampling distribution in RMPPI\n   *\n   * @param u_fb - feedback control\n   * @param t - timestep\n   * @param distribution_idx - distribution index\n   * @param lambda - MPPI temperature parameter\n   * @param alpha - coeff to turn off the likelihood cost (set to 1 -> no likelihood cost, set to 0 -> all likelihood\n   *\n   * @return feedback cost\n   */\n  __host__ float computeFeedbackCost(const Eigen::Ref<const control_array>& u_fb, const int t,\n                                     const int distribution_idx, const float lambda = 1.0, const float alpha = 0.0);\n\n  /**\n   * @brief Device method to calculate the likelihood ratio cost for a given sample u\n   *\n   * @param u - sampled control\n   * @param theta_d - shared memory for sampling distribution\n   * @param t - timestep\n   * @param distribution_idx - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param lambda - MPPI temperature parameter\n   * @param alpha - coeff to turn off the likelihood cost (set to 1 -> no likelihood cost, set to 0 -> all likelihood\n   * cost)\n   */\n  __host__ __device__ float computeLikelihoodRatioCost(const float* __restrict__ u, float* __restrict__ theta_d,\n                                                       const int sample_index, const int t, const int distribution_idx,\n                                                       const float lambda = 1.0, const float alpha = 0.0);\n\n  /**\n   * @brief Host-side method to calculate the likelihood ration cost for a given sample u\n   *\n   * @param u - sampled control\n   * @param t - timestep\n   * @param distribution_idx - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param lambda - MPPI temperature parameter\n   * @param alpha - coeff to turn off the likelihood cost (set to 1 -> no likelihood cost, set to 0 -> all likelihood\n   * cost)\n   */\n  __host__ float computeLikelihoodRatioCost(const Eigen::Ref<const control_array>& u, const int sample_index,\n                                            const int t, const int distribution_idx, const float lambda = 1.0,\n                                            const float alpha = 0.0);\n\n  /**\n   * @brief Get the latest importance sampler from time-shifting on the controller and update the device importance\n   * sampler\n   *\n   * @param importance_sampler - host pointer to a control sequence that is NUM_TIMESTEPS * CONTROL_DIM\n   * @param distribution_idx - which distribution is the importance sampler meant for\n   * @param synchronize - whether or not to run cudaStreamSynchronize\n   */\n  __host__ void copyImportanceSamplerToDevice(const float* importance_sampler, const int& distribution_idx,\n                                              bool synchronize = true);\n\n  /**\n   * @brief Generate control samples that will be on the GPU.\n   *\n   * @param optimization_stride - timestep to start control samples from\n   * @param iteration_num - which iteration of the algorithm we are on. Useful for decaying std_dev\n   * @param gen - pseudo-random noise generator\n   */\n  __host__ void generateSamples(const int& optimization_stride, const int& iteration_num, curandGenerator_t& gen,\n                                bool synchronize = true);\n\n  /**\n   * @brief Set the Host-side Optimal Control Trajectory\n   *\n   * @param optimal_control_trajectory - pointer to CPU memory location to store the optimal control\n   * @param distribution_idx - which distribution we are looking for the optimal control from (Useful for Tube and\n   * RMPPI)\n   * @param synchronize - whether or not to run cudaStreamSynchronize\n   */\n  __host__ void setHostOptimalControlSequence(float* optimal_control_trajectory, const int& distribution_idx,\n                                              bool synchronize = true);\n\n  /**\n   * @brief takes in the cost of each sample generated and conducts an update of the distribution (For Gaussians, mean\n   * update)\n   *\n   * @param trajectory_weights_d - vector of weights of size num_rollouts located on the GPU\n   * @param normalizer - sum of all weights\n   * @param distribution_i - which distribution to update\n   * @param synchronize - whether or not to run cudaStreamSynchronize\n   */\n  __host__ virtual void updateDistributionParamsFromDevice(const float* trajectory_weights_d, float normalizer,\n                                                           const int& distribution_i, bool synchronize = false) = 0;\n\n  /**\n   * @brief Write to a specific control sample located at [distribution_index][sample_index][t] from the\n   * control array\n   *\n   * @param sample_index - sample number out of num_rollouts\n   * @param t - timestep out of num_timesteps\n   * @param distribution_index - distribution index (if it is larger than num_distributions, it just defaults to first\n   * distribution for future compatibility with sampling dynamical systems)\n   * @param control - pointer to control array with the desired data\n   * @param theta_d - shared memory pointer for passing through params\n   * @param block_size - parallelizable step size for the gpu (normally blockDim.y)\n   * @param thread_index - parallelizable index for the gpu (normally threadIdx.y)\n   * @param output - output pointer for compatibility with a output-based sampling distribution\n   */\n  __device__ void writeControlSample(const int& sample_index, const int& t, const int& distribution_index,\n                                     const float* __restrict__ control, float* __restrict__ theta_d,\n                                     const int& block_size = 1, const int& thread_index = 1,\n                                     const float* __restrict__ output = nullptr);\n\n  CLASS_T* sampling_d_ = nullptr;\n  cudaStream_t vis_stream_ = nullptr;\n\nprotected:\n  float* control_samples_d_ = nullptr;\n  float* vis_control_samples_d_ = nullptr;\n\n  SAMPLING_PARAMS_T params_;\n};\n\n#ifdef __CUDACC__\n#include \"sampling_distribution.cu\"\n#endif\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\nconst int SamplingDistribution<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>::CONTROL_DIM;\n\n// template <int C_DIM>\n// const int SamplingParams<C_DIM>::CONTROL_DIM;\n}  // namespace sampling_distributions\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/sampling_distributions/smooth-MPPI/smooth-MPPI.cu",
    "content": "/**\n * Created by Bogdan Vlahov on Jan 8, 2024\n **/\n\n#define SMOOTH_MPPI_TEMPLATE template <class CLASS_T, template <int> class PARAMS_TEMPLATE, class DYN_PARAMS_T>\n#define SMOOTH_MPPI_NOISE SmoothMPPIDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n\n#include <mppi/sampling_distributions/smooth-MPPI/smooth-MPPI.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n#include <mppi/utils/math_utils.h>\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\n__global__ void integrateNoise(const float* __restrict__ action_deriv_d, const float* __restrict__ control_mean_d,\n                               float* __restrict__ control_d, const int num_samples, const int num_timesteps,\n                               const int control_dim, const float dt)\n{\n  const int sample_index = blockIdx.x * blockDim.x + threadIdx.x;\n  const int control_index = blockIdx.y * blockDim.y + threadIdx.y;\n  int noise_index = 0, mean_index = 0;\n  if (sample_index < num_samples && control_index < control_dim)\n  {\n    for (int t = 0; t < num_timesteps; t++)\n    {\n      noise_index = (sample_index * num_timesteps + t) * control_dim + control_index;\n      mean_index = t * control_dim + control_index;\n      control_d[noise_index] = control_mean_d[mean_index] + action_deriv_d[noise_index] * dt;\n    }\n  }\n}\n\n__global__ void shiftControlTrajectory(float* __restrict__ control_trajectory_d, const int num_distributions,\n                                       const int num_timesteps, const int control_dim, const int shift_index)\n{\n  extern __shared__ float shared_mean[];\n  const int dist_index = blockIdx.x;\n  const int control_index = threadIdx.x;\n\n  mppi::p1::loadArrayParallel<mppi::p1::Parallel1Dir::THREAD_X>(\n      shared_mean, 0, control_trajectory_d, dist_index * num_timesteps * control_dim, num_timesteps * control_dim);\n  __syncthreads();\n  int p_index, p_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_X>(p_index, p_step);\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    int write_index = t * control_dim;\n    int read_index = min(t + shift_index, num_timesteps - 1) * control_dim;\n\n    // if (control_dim % 4 == 0)\n    // {\n    //   for (int i = p_index; i < control_dim / 4; i += 4 * p_step)\n    //   {\n    //     reinterpret_cast<float4*>(&shared_mean[write_index])[i] =\n    //         reinterpret_cast<float4*>(&shared_mean[read_index])[i];\n    //   }\n    // }\n    // else if (control_dim % 2 == 0)\n    // {\n    //   for (int i = p_index; i < control_dim / 2; i += 2 * p_step)\n    //   {\n    //     reinterpret_cast<float2*>(&shared_mean[write_index])[i] =\n    //         reinterpret_cast<float2*>(&shared_mean[read_index])[i];\n    //   }\n    // }\n    // else\n    // {\n    for (int i = 0; i < control_dim; i += 1)\n    {\n      shared_mean[write_index + i] = shared_mean[read_index + i];\n    }\n    // }\n  }\n  __syncthreads();\n  mppi::p1::loadArrayParallel<mppi::p1::Parallel1Dir::THREAD_X>(\n      control_trajectory_d, dist_index * num_timesteps * control_dim, shared_mean, 0, num_timesteps * control_dim);\n}\n\nSMOOTH_MPPI_TEMPLATE\nSMOOTH_MPPI_NOISE::SmoothMPPIDistributionImpl(cudaStream_t stream) : PARENT_CLASS::GaussianDistributionImpl(stream)\n{\n}\n\nSMOOTH_MPPI_TEMPLATE\nSMOOTH_MPPI_NOISE::SmoothMPPIDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream)\n  : PARENT_CLASS::GaussianDistributionImpl(params, stream)\n{\n}\n\nSMOOTH_MPPI_TEMPLATE\n__host__ void SMOOTH_MPPI_NOISE::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFree(deriv_action_mean_d_));\n    HANDLE_ERROR(cudaFree(deriv_action_noise_d_));\n  }\n  PARENT_CLASS::freeCudaMem();\n}\n\nSMOOTH_MPPI_TEMPLATE\n__host__ void SMOOTH_MPPI_NOISE::allocateCUDAMemoryHelper()\n{\n  PARENT_CLASS::allocateCUDAMemoryHelper();\n  if (this->GPUMemStatus_)\n  {\n    if (deriv_action_noise_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(deriv_action_noise_d_, this->stream_));\n    }\n    HANDLE_ERROR(cudaMallocAsync((void**)&deriv_action_noise_d_,\n                                 sizeof(float) * this->CONTROL_DIM * this->getNumRollouts() *\n                                     this->getNumDistributions() * this->getNumTimesteps(),\n                                 this->stream_));\n    if (deriv_action_mean_d_)\n    {\n      HANDLE_ERROR(cudaFreeAsync(deriv_action_mean_d_, this->stream_));\n    }\n    HANDLE_ERROR(cudaMallocAsync(\n        (void**)&deriv_action_mean_d_,\n        sizeof(float) * this->CONTROL_DIM * this->getNumTimesteps() * this->getNumDistributions(), this->stream_));\n  }\n}\n\nSMOOTH_MPPI_TEMPLATE\n__host__ void SMOOTH_MPPI_NOISE::generateSamples(const int& optimization_stride, const int& iteration_num,\n                                                 curandGenerator_t& gen, bool synchronize)\n{\n  if (this->params_.use_same_noise_for_all_distributions)\n  {\n    HANDLE_CURAND_ERROR(curandGenerateNormal(\n        gen, this->deriv_action_noise_d_, this->getNumTimesteps() * this->getNumRollouts() * CONTROL_DIM, 0.0f, 1.0f));\n    for (int i = 1; i < this->getNumDistributions(); i++)\n    {\n      HANDLE_ERROR(cudaMemcpyAsync(\n          &this->deriv_action_noise_d_[this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM * i],\n          this->deriv_action_noise_d_, sizeof(float) * this->getNumRollouts() * this->getNumTimesteps() * CONTROL_DIM,\n          cudaMemcpyDeviceToDevice, this->stream_));\n    }\n  }\n  else\n  {\n    HANDLE_CURAND_ERROR(curandGenerateNormal(\n        gen, this->deriv_action_noise_d_,\n        this->getNumTimesteps() * this->getNumRollouts() * this->getNumDistributions() * CONTROL_DIM, 0.0f, 1.0f));\n  }\n  // Shift derivative action sequence\n  dim3 control_block = dim3(this->CONTROL_DIM, 1, 1);\n  dim3 control_grid = dim3(this->getNumDistributions(), 1, 1);\n  unsigned int shared_mean_size = sizeof(float) * this->CONTROL_DIM * this->getNumTimesteps();\n  shiftControlTrajectory<<<control_grid, control_block, shared_mean_size, this->stream_>>>(\n      deriv_action_mean_d_, this->getNumDistributions(), this->getNumTimesteps(), this->CONTROL_DIM,\n      optimization_stride);\n\n  const int BLOCKSIZE_X = this->params_.rewrite_controls_block_dim.x;\n  const int BLOCKSIZE_Y = this->params_.rewrite_controls_block_dim.y;\n  const int BLOCKSIZE_Z = this->params_.rewrite_controls_block_dim.z;\n  /**\n   * Generate noise samples with mean added\n   **/\n  dim3 control_writing_grid;\n  control_writing_grid.x = mppi::math::int_ceil(this->getNumRollouts(), BLOCKSIZE_X);\n  control_writing_grid.y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  control_writing_grid.z = mppi::math::int_ceil(this->getNumDistributions(), BLOCKSIZE_Z);\n  unsigned int std_dev_mem_size = this->getNumDistributions() * CONTROL_DIM;\n  // Allocate shared memory for std_deviations per timestep or constant across the trajectory\n  std_dev_mem_size = mppi::math::nearest_multiple_4(\n      this->params_.time_specific_std_dev ? std_dev_mem_size * this->getNumTimesteps() : std_dev_mem_size);\n  unsigned int shared_mem_size =\n      std_dev_mem_size +\n      mppi::math::nearest_multiple_4(this->getNumDistributions() * this->getNumTimesteps() * CONTROL_DIM) +\n      mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Y * BLOCKSIZE_Z * CONTROL_DIM);\n  shared_mem_size *= sizeof(float);\n  setGaussianControls<<<control_writing_grid, this->params_.rewrite_controls_block_dim, shared_mem_size,\n                        this->stream_>>>(\n      this->deriv_action_mean_d_, this->std_dev_d_, this->deriv_action_noise_d_, CONTROL_DIM, this->getNumTimesteps(),\n      this->getNumRollouts(), this->getNumDistributions(), optimization_stride,\n      powf(this->params_.std_dev_decay, iteration_num), this->params_.pure_noise_trajectories_percentage,\n      this->params_.time_specific_std_dev);\n\n  // integrate derivative controls to make true controls\n  dim3 integrate_noise_grid;\n  integrate_noise_grid.x = mppi::math::int_ceil(this->getNumRollouts() * this->getNumDistributions(), BLOCKSIZE_X);\n  integrate_noise_grid.y = 1;\n  integrate_noise_grid.z = 1;\n  dim3 integrate_noise_block(BLOCKSIZE_X, this->CONTROL_DIM, 1);\n  // control_writing_grid.y = mppi::math::int_ceil(this->getNumTimesteps(), BLOCKSIZE_Y);\n  // control_writing_grid.z = mppi::math::int_ceil(this->getNumDistributions(), BLOCKSIZE_Z);\n  // std::cout << \"Integrating samples \" << this->getNumRollouts() * this->getNumDistributions() << std::endl;\n  integrateNoise<<<control_writing_grid, integrate_noise_block, 0, this->stream_>>>(\n      this->deriv_action_noise_d_, this->control_means_d_, this->control_samples_d_,\n      this->getNumRollouts() * this->getNumDistributions(), this->getNumTimesteps(), this->CONTROL_DIM,\n      this->params_.dt);\n\n  HANDLE_ERROR(cudaGetLastError());\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\nSMOOTH_MPPI_TEMPLATE\n__host__ void SMOOTH_MPPI_NOISE::updateDistributionParamsFromDevice(const float* trajectory_weights_d, float normalizer,\n                                                                    const int& distribution_i, bool synchronize)\n{\n  if (distribution_i >= this->getNumDistributions())\n  {\n    std::cerr << \"Updating distributional params for distribution \" << distribution_i << \" out of \"\n              << this->getNumDistributions() << \" total.\" << std::endl;\n    return;\n  }\n  int mean_index = distribution_i * this->getNumTimesteps() * this->CONTROL_DIM;\n  int sample_index = distribution_i * this->getNumRollouts() * this->getNumTimesteps() * this->CONTROL_DIM;\n  float* deriv_action_noise_i_d = &(this->deriv_action_noise_d_[sample_index]);\n  float* deriv_action_mean_i_d = &(this->deriv_action_mean_d_[mean_index]);\n  mppi::kernels::launchWeightedReductionKernel<CONTROL_DIM>(\n      trajectory_weights_d, deriv_action_noise_i_d, deriv_action_mean_i_d, normalizer, this->getNumTimesteps(),\n      this->getNumRollouts(), this->params_.sum_strides, this->stream_, synchronize);\n  dim3 grid(1, 1, 1);\n  dim3 block(1, this->CONTROL_DIM, 1);\n  // std::cout << \"Integrating optimal sequence\" << std::endl;\n  integrateNoise<<<grid, block, 0, this->stream_>>>(deriv_action_mean_i_d, &this->control_means_d_[mean_index],\n                                                    &this->control_samples_d_[sample_index], 1, this->getNumTimesteps(),\n                                                    this->CONTROL_DIM, this->params_.dt);\n  HANDLE_ERROR(cudaMemcpyAsync(&this->control_means_d_[mean_index], &this->control_samples_d_[sample_index],\n                               sizeof(float) * this->getNumTimesteps() * this->CONTROL_DIM, cudaMemcpyDeviceToDevice,\n                               this->stream_));\n  HANDLE_ERROR(cudaMemcpyAsync(&this->means_[mean_index], &this->control_means_d_[mean_index],\n                               sizeof(float) * this->getNumTimesteps() * CONTROL_DIM, cudaMemcpyDeviceToHost,\n                               this->stream_));\n  // HANDLE_ERROR(cudaMemcpyAsync(&means_[distribution_i * this->getNumTimesteps() * CONTROL_DIM],\n  // deriv_action_mean_i_d,\n  //                              sizeof(float) * this->getNumTimesteps() * CONTROL_DIM, cudaMemcpyDeviceToHost,\n  //                              this->stream_));\n  if (synchronize)\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n  }\n}\n\n// SMOOTH_MPPI_TEMPLATE\n// __host__ void SMOOTH_MPPI_NOISE::setHostOptimalControlSequence(float* optimal_control_trajectory,\n//                                                             const int& distribution_i, bool synchronize)\n// {\n//   if (distribution_i >= this->getNumDistributions())\n//   {\n//     std::cerr << \"Asking for optimal control sequence from distribution \" << distribution_i << \" out of \"\n//               << this->getNumDistributions() << \" total.\" << std::endl;\n//     return;\n//   }\n\n//   HANDLE_ERROR(cudaMemcpyAsync(\n//       optimal_control_trajectory, &(this->control_means_d_[this->getNumTimesteps() * CONTROL_DIM * distribution_i]),\n//       sizeof(float) * this->getNumTimesteps() * CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n//   if (synchronize)\n//   {\n//     HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n//   }\n// }\n}  // namespace sampling_distributions\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/sampling_distributions/smooth-MPPI/smooth-MPPI.cuh",
    "content": "#pragma once\n/**\n * Created by Bogdan, Jan 8, 2024\n */\n\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n\nnamespace mppi\n{\nnamespace sampling_distributions\n{\n__global__ void integrateNoise(const float* __restrict__ action_deriv_d, const float* __restrict__ control_mean_d,\n                               float* __restrict__ control_d, const int num_samples, const int num_timesteps,\n                               const int control_dim, const float dt);\n__global__ void shiftControlTrajectory(float* __restrict__ control_trajectory_d, const int num_distributions,\n                                       const int num_timesteps, const int control_dim, const int shift_index);\n\ntemplate <int C_DIM, int MAX_DISTRIBUTIONS_T = 2>\nstruct SmoothMPPIParamsImpl : public GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS_T>\n{\n  float dt = 0.015f;\n  dim3 shift_trajectory_block;\n  SmoothMPPIParamsImpl(int num_rollouts = 1, int num_timesteps = 1, int num_distributions = 1)\n    : GaussianParamsImpl<C_DIM, MAX_DISTRIBUTIONS_T>(num_rollouts, num_timesteps, num_distributions)\n  {\n  }\n};\n\ntemplate <int C_DIM>\nusing SmoothMPPIParams = SmoothMPPIParamsImpl<C_DIM, 2>;\n\ntemplate <class CLASS_T, template <int> class PARAMS_TEMPLATE = SmoothMPPIParams, class DYN_PARAMS_T = DynamicsParams>\nclass SmoothMPPIDistributionImpl : public GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = GaussianDistributionImpl<CLASS_T, PARAMS_TEMPLATE, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n  using control_array = typename PARENT_CLASS::control_array;\n\n  static const int CONTROL_DIM = PARENT_CLASS::CONTROL_DIM;\n\n  SmoothMPPIDistributionImpl(cudaStream_t stream = 0);\n  SmoothMPPIDistributionImpl(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0);\n\n  ~SmoothMPPIDistributionImpl()\n  {\n    freeCudaMem();\n  }\n\n  __host__ virtual std::string getSamplingDistributionName() const override\n  {\n    return \"Smooth-MPPI\";\n  }\n\n  __host__ void allocateCUDAMemoryHelper();\n\n  __host__ void freeCudaMem();\n\n  __host__ void generateSamples(const int& optimization_stride, const int& iteration_num, curandGenerator_t& gen,\n                                bool synchronize = true);\n\n  // __host__ void copyImportanceSamplerToDevice(const float* importance_sampler,\n  //                                             const int& distribution_idx, bool synchronize = true);\n  // __host__ void setHostOptimalControlSequence(float* optimal_control_trajectory, const int& distribution_i,\n  //                                             bool synchronize = true);\n\n  __host__ void updateDistributionParamsFromDevice(const float* trajectory_weights_d, float normalizer,\n                                                   const int& distribution_i, bool synchronize = false) override;\n\nprotected:\n  float* deriv_action_mean_d_ = nullptr;\n  float* deriv_action_noise_d_ = nullptr;\n};\n\ntemplate <class DYN_PARAMS_T>\nclass SmoothMPPIDistribution\n  : public SmoothMPPIDistributionImpl<SmoothMPPIDistribution<DYN_PARAMS_T>, SmoothMPPIParams, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = SmoothMPPIDistributionImpl<SmoothMPPIDistribution, SmoothMPPIParams, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n\n  SmoothMPPIDistribution(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n  SmoothMPPIDistribution(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0) : PARENT_CLASS(params, stream)\n  {\n  }\n};\n}  // namespace sampling_distributions\n}  // namespace mppi\n\n#ifdef __CUDACC__\n#include \"smooth-MPPI.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/shaping_functions/CEM/cem_shaping_function.cu",
    "content": "template <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\n__host__ __device__ float CEMShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::computeWeight(\n    float* traj_cost, float baseline, int global_idx)\n{\n  if (traj_cost[global_idx] >= baseline)\n  {\n    return (1.0 / ((int)(NUM_ROLLOUTS) * this->params_.gamma + 1));\n  }\n  else\n  {\n    return 0.0;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid CEMShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::computeWeights(cost_traj& trajectory_costs,\n                                                                                     float* trajectory_costs_d,\n                                                                                     cudaStream_t stream)\n{\n  // Copy the costs back to the host\n  HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs.data(), trajectory_costs_d, NUM_ROLLOUTS * sizeof(float),\n                               cudaMemcpyDeviceToHost, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  std::vector<int> indexes(NUM_ROLLOUTS);\n  iota(indexes.begin(), indexes.end(), 0);\n\n  std::nth_element(indexes.begin(), indexes.begin() + (int)(NUM_ROLLOUTS * this->params_.gamma), indexes.end(),\n                   [&trajectory_costs](int i1, int i2) { return trajectory_costs(i1) > trajectory_costs(i2); });\n\n  this->baseline_ = trajectory_costs(indexes[(int)NUM_ROLLOUTS * this->params_.gamma]);\n\n  // launch the WeightKernel to calculate the weights\n  if (this->params_.gamma == 0)\n  {\n    trajectory_costs(indexes[0]) = 1.0;\n    for (int i = 1; i < NUM_ROLLOUTS; i++)\n    {\n      trajectory_costs(indexes[i]) = 0.0;\n    }\n  }\n  else\n  {\n    CLASS_T* derived = static_cast<CLASS_T*>(this);\n    derived->launchWeightKernel(trajectory_costs_d, this->baseline_, stream);\n\n    HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs.data(), trajectory_costs_d, NUM_ROLLOUTS * sizeof(float),\n                                 cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n}\n"
  },
  {
    "path": "include/mppi/shaping_functions/CEM/cem_shaping_function.cuh",
    "content": "#ifndef MPPIGENERIC_CEM_SHAPING_FUNCTION_CUH\n#define MPPIGENERIC_CEM_SHAPING_FUNCTION_CUH\n\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/utils/managed.cuh>\n#include <mppi/shaping_functions/shaping_function.cuh>\n\nstruct CEMShapingFunctionParams\n{\n  float gamma;  // represents the elite sample percentage\n};\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nclass CEMShapingFunctionImpl : public ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>\n{\npublic:\n  typedef Eigen::Matrix<float, NUM_ROLLOUTS, 1> cost_traj;\n\n  CEMShapingFunctionImpl()\n  {\n    this->normalizer_ = 1.0;\n  }\n\n  __host__ __device__ float computeWeight(float* traj_cost, float baseline, int global_idx);\n\n  void computeWeights(cost_traj& trajectory_costs, float* trajectory_costs_d, cudaStream_t stream = nullptr);\n\n  /*\n   * TODO what should this be for CEM?\n  void computeFreeEnergy(float& free_energy, float& free_energy_var,\n                         float& free_energy_modified,\n                         float* cost_rollouts_host, float baseline);\n                         */\n};\n\ntemplate <int NUM_ROLLOUTS, int BDIM_X>\nclass CEMShapingFunction : public CEMShapingFunctionImpl<CEMShapingFunction<NUM_ROLLOUTS, BDIM_X>,\n                                                         CEMShapingFunctionParams, NUM_ROLLOUTS, BDIM_X>\n{\npublic:\n};\n\n#if __CUDACC__\n#include \"cem_shaping_function.cu\"\n#endif\n\n#endif  // MPPIGENERIC_CEM_SHAPING_FUNCTION_CUH\n"
  },
  {
    "path": "include/mppi/shaping_functions/shaping_function.cu",
    "content": "#include \"shaping_function.cuh\"\n#include <mppi/utils/math_utils.h>\nnamespace mppi_common\n{\ntemplate <class CLASS_T, int NUM_ROLLOUTS>\n__global__ void weightKernel(float* trajectory_costs_d, float baseline, CLASS_T* shape_function)\n{\n  int global_idx = (blockDim.x * blockIdx.x + threadIdx.x) * blockDim.z + threadIdx.z;\n\n  if (global_idx < NUM_ROLLOUTS * blockDim.z)\n  {\n    trajectory_costs_d[global_idx] = shape_function->computeWeight(trajectory_costs_d, baseline, global_idx);\n  }\n}\n}  // namespace mppi_common\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::paramsToDevice()\n{\n  if (GPUMemStatus_)\n  {\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&shaping_function_d_->params_, &params_, sizeof(PARAMS_T), cudaMemcpyHostToDevice, stream_));\n    HANDLE_ERROR(cudaStreamSynchronize(stream_));\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::freeCudaMem()\n{\n  if (GPUMemStatus_)\n  {\n    cudaFree(shaping_function_d_);\n    GPUMemStatus_ = false;\n    shaping_function_d_ = nullptr;\n  }\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::GPUSetup()\n{\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  if (!GPUMemStatus_)\n  {\n    shaping_function_d_ = Managed::GPUSetup<CLASS_T>(derived);\n  }\n  else\n  {\n    std::cout << \"GPU Memory already set\" << std::endl;  // TODO should this be an exception?\n  }\n  derived->paramsToDevice();\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\n__host__ __device__ float ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::computeWeight(float* traj_cost,\n                                                                                                      float baseline,\n                                                                                                      int global_idx)\n{\n  return expf(-this->params_.lambda_inv * (traj_cost[global_idx] - baseline));\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::computeWeights(cost_traj& trajectory_costs,\n                                                                                  float* trajectory_costs_d,\n                                                                                  cudaStream_t stream)\n{\n  // Copy the costs back to the host\n  HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs.data(), trajectory_costs_d, NUM_ROLLOUTS * sizeof(float),\n                               cudaMemcpyDeviceToHost, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  baseline_ = trajectory_costs.minCoeff();\n\n  // launch the WeightKernel to calculate the weights\n  CLASS_T* derived = static_cast<CLASS_T*>(this);\n  derived->launchWeightKernel(trajectory_costs_d, baseline_, stream);\n\n  HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs.data(), trajectory_costs_d, NUM_ROLLOUTS * sizeof(float),\n                               cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  normalizer_ = trajectory_costs.sum();\n\n  // TODO after this you should grab the baseline value and the normalizer\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::launchWeightKernel(float* trajectory_costs_d,\n                                                                                      float baseline,\n                                                                                      cudaStream_t stream)\n{\n  dim3 dimBlock(BDIM_X, 1, 1);\n  dim3 dimGrid((NUM_ROLLOUTS - 1) / BDIM_X + 1, 1, 1);\n  mppi_common::weightKernel<CLASS_T, NUM_ROLLOUTS>\n      <<<1, NUM_ROLLOUTS>>>(trajectory_costs_d, baseline, this->shaping_function_d_);\n  CudaCheckError();\n}\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid ShapingFunctionImpl<CLASS_T, PARAMS_T, NUM_ROLLOUTS, BDIM_X>::computeFreeEnergy(\n    float& free_energy, float& free_energy_var, float& free_energy_modified, float* cost_rollouts_host, float baseline)\n{\n  float var = 0;\n  float norm = 0;\n  for (int i = 0; i < NUM_ROLLOUTS; i++)\n  {\n    norm += cost_rollouts_host[i];\n    var += SQ(cost_rollouts_host[i]);\n  }\n  norm /= NUM_ROLLOUTS;\n  free_energy = -1.0f / this->params_.lambda_inv * logf(norm) + baseline;\n  free_energy_var = 1.0f / this->params_.lambda_inv * (var / NUM_ROLLOUTS - SQ(norm));\n  // TODO Figure out the point of the following lines\n  float weird_term = free_energy_var / (norm * sqrtf(1.0 * NUM_ROLLOUTS));\n  free_energy_modified = 1.0f / this->params_.lambda_inv * (weird_term + 0.5 * SQ(weird_term));\n}\n"
  },
  {
    "path": "include/mppi/shaping_functions/shaping_function.cuh",
    "content": "#ifndef MPPIGENERIC_SHAPING_FUNCTION_CUH\n#define MPPIGENERIC_SHAPING_FUNCTION_CUH\n\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/utils/managed.cuh>\n\nnamespace mppi_common\n{\ntemplate <class CLASS_T, int NUM_ROLLOUTS>\n__global__ void weightKernel(float* trajectory_costs_d, float baseline, CLASS_T* shape_function);\n}\n\nstruct ShapingFunctionParams\n{\n  float lambda_inv = 1.0;  // also known as gamma\n};\n\ntemplate <class CLASS_T, class PARAMS_T, int NUM_ROLLOUTS, int BDIM_X>\nclass ShapingFunctionImpl : public Managed\n{\npublic:\n  typedef Eigen::Matrix<float, NUM_ROLLOUTS, 1> cost_traj;\n\n  ShapingFunctionImpl()\n  {\n    paramsToDevice();\n  }\n\n  __host__ __device__ float computeWeight(float* traj_cost, float baseline, int global_idx);\n\n  void computeWeights(cost_traj& trajectory_costs, float* trajectory_costs_d, cudaStream_t stream = nullptr);\n\n  void launchWeightKernel(float* trajectory_costs_d, float baseline, cudaStream_t stream = nullptr);\n\n  void computeFreeEnergy(float& free_energy, float& free_energy_var, float& free_energy_modified,\n                         float* cost_rollouts_host, float baseline);\n\n  PARAMS_T getParams()\n  {\n    return params_;\n  }\n  float getBaseline()\n  {\n    return baseline_;\n  }\n  float getNormalizer()\n  {\n    return normalizer_;\n  }\n\n  void setParams(const PARAMS_T& params)\n  {\n    this->params_ = params;\n    paramsToDevice();\n  }\n\n  void GPUSetup();\n  void freeCudaMem();\n  void paramsToDevice();\n\n  CLASS_T* shaping_function_d_ = nullptr;\n\nprotected:\n  PARAMS_T params_;\n  float baseline_;\n  float normalizer_;\n};\n\ntemplate <int NUM_ROLLOUTS, int BDIM_X>\nclass ShapingFunction\n  : public ShapingFunctionImpl<ShapingFunction<NUM_ROLLOUTS, BDIM_X>, ShapingFunctionParams, NUM_ROLLOUTS, BDIM_X>\n{\npublic:\n};\n\n#if __CUDACC__\n#include \"shaping_function.cu\"\n#endif\n\n#endif  // MPPIGENERIC_SHAPING_FUNCTION_CUH\n"
  },
  {
    "path": "include/mppi/utils/activation_functions.cuh",
    "content": "#ifndef ACTIVATION_FUNCTIONS_CUH_\n#define ACTIVATION_FUNCTIONS_CUH_\n\n#include \"math_utils.h\"\nnamespace mppi\n{\nnamespace nn\n{\n/**\n * There is hardware support for the tanh in some GPUs, so the hand written version is slower\n * Checked on a 3080 and 1050ti\n * @param input\n * @return\n */\ninline __host__ __device__ float tanh(float input)\n{\n  // There is hardware support for tanh starting in Turing (above 750+) that makes this approximation slower\n// #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 750)\n//   const float num = __expf(2.0f * input);\n//   // return __fdiv_rz(num - 1.0f, num + 1.0f);\n//   return (num - 1.0f) / (num + 1.0f);\n// #else\n//   return tanhf(input);\n// #endif\n  return tanhf(input);\n}\n\ninline __host__ __device__ float tanh_accurate(float input)\n{\n  return tanhf(input);\n}\n\ninline __host__ __device__ float tanh_deriv(float input)\n{\n  return 1.0f - SQ(mppi::nn::tanh(input));\n}\n\ninline __host__ __device__ float relu(float input)\n{\n  return fmaxf(0.0f, input);\n}\n\n/**\n * Uses hardware support for tanh function, should be faster\n * Tested on 3080 and 1050ti\n * @param input\n * @return\n */\ninline __host__ __device__ float sigmoid(float input)\n{\n#ifdef __CUDA_ARCH__\n  // these three are roughly equivalent on 3080\n  // return __fmul_rz(1.0f + tanhf(__fmul_rz(input, 0.5f)), 0.5f);\n  // return __fmaf_rz(tanhf(__fmul_rz(input, 0.5f)), 0.5f, 0.5f);\n  return (1.0f + mppi::nn::tanh(input / 2.0f)) / 2.0f;\n#else\n  return (1.0f / (1.0f + expf(-input)));\n#endif\n}\n\ninline __host__ __device__ float sigmoid_accurate(float input)\n{\n  return (1.0f / (1.0f + expf(-input)));\n}\n\n/**\n *\n * @param input\n * @return\n */\n__host__ __device__ static inline float tanh_vel_scale(float state, float vel, const float constants[2])\n{\n  return state * constants[1] * mppi::nn::tanh(vel * constants[0]);\n}\n\n/**\n *\n * @param input\n * @return\n */\n__host__ __device__ static inline float tanh_scale(float state, const float constants[2])\n{\n  return constants[1] * mppi::nn::tanh(state * constants[0]);\n}\n\n/**\n *\n * @param x\n * @return\n */\n__host__ __device__ static inline float tanhshrink(float x)\n{\n  return x - mppi::nn::tanh(x);\n}\n\n/**\n *\n * @param x\n * @return\n */\n__host__ __device__ static inline float tanhshrink_scale(float x, float scale)\n{\n  return scale * tanhshrink(x / scale);\n}\n\n}  // namespace nn\n}  // namespace mppi\n\n#endif  // ACTIVATION_FUNCTIONS_CUH_\n"
  },
  {
    "path": "include/mppi/utils/angle_utils.cuh",
    "content": "#ifndef ANGLES_CUH_\n#define ANGLES_CUH_\n\n#include <cuda_runtime.h>\n#include <cmath>\n\nnamespace angle_utils\n{\n/**\n *\n * @param angle\n * @return\n */\n__host__ __device__ static inline double normalizeAngle(double angle)\n{\n  const double result = fmod(angle + M_PI, 2.0 * M_PI);\n  if (result <= 0.0)\n    return result + M_PI;\n  return result - M_PI;\n}\n// float version might not be exact\n// different systems will have slightly different values.\n__host__ __device__ static inline float normalizeAngle(float angle)\n{\n  const float result = fmodf(angle + static_cast<float>(M_PI), static_cast<float>(2.0 * M_PI));\n  if (result <= 0.0f)\n    return result + static_cast<float>(M_PI);\n  return result - static_cast<float>(M_PI);\n}\n\n/**\n *\n * @param from\n * @param to\n * @return\n */\n__host__ __device__ static inline double shortestAngularDistance(double from, double to)\n{\n  return normalizeAngle(to - from);\n}\n__host__ __device__ static inline float shortestAngularDistance(float from, float to)\n{\n  return normalizeAngle(to - from);\n}\n\n/**\n * Does a linear interpolation of the euler angle while respecting -pi to pi wrapping\n * solution from https://www.ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf algorithm 6\n * @param angle_1\n * @param angle_2\n * @param alpha\n * @return\n */\n__host__ __device__ static inline double interpolateEulerAngleLinear(double angle_1, double angle_2, double alpha)\n{\n  double angle_diff = shortestAngularDistance(angle_1, angle_2);\n  return normalizeAngle(angle_1 + alpha * angle_diff);\n}\n__host__ __device__ static inline float interpolateEulerAngleLinear(float angle_1, float angle_2, float alpha)\n{\n  float angle_diff = shortestAngularDistance(angle_1, angle_2);\n  return normalizeAngle(angle_1 + alpha * angle_diff);\n}\n}  // namespace angle_utils\n\n#endif  // ANGLES_CUH_\n"
  },
  {
    "path": "include/mppi/utils/cuda_math_utils.cuh",
    "content": "//\n// Created by jason on 4/25/22.\n//\n\n#ifndef MPPIGENERIC_CUDA_MATH_UTILS_CUH\n#define MPPIGENERIC_CUDA_MATH_UTILS_CUH\n\n#include <cassert>\n\n#ifndef __UNROLL\n#define __xstr__(s) __str__(s)\n#define __str__(s) #s\n#ifdef __CUDACC__\n#define __UNROLL(a) _Pragma(\"unroll\")\n#elif defined(__GNUC__)  // GCC is the compiler and uses different unroll syntax\n#define __UNROLL(a) _Pragma(__xstr__(GCC unroll a))\n#endif\n#endif\n\n// Matching float4 syntax\ntemplate <class T = float>\nstruct __align__(4 * sizeof(T)) type4\n{\n  T x;\n  T y;\n  T z;\n  T w;\n  // Allow writing to struct using array index\n  __host__ __device__ T& operator[](int i)\n  {\n    assert(i >= 0);\n    assert(i < 4);\n    return (i > 1) ? ((i == 2) ? z : w) : ((i == 0) ? x : y);\n  }\n  // Allow reading from struct using array index\n  __host__ __device__ const T& operator[](int i) const\n  {\n    assert(i >= 0);\n    assert(i < 4);\n    return (i > 1) ? ((i == 2) ? z : w) : ((i == 0) ? x : y);\n  }\n};\n\ntemplate <class T = float>\nstruct __align__(2 * sizeof(T)) type2\n{\n  T x;\n  T y;\n\n  // Allow writing to struct using array index\n  __host__ __device__ T& operator[](int i)\n  {\n    assert(i >= 0);\n    assert(i < 2);\n    return (i == 0) ? x : y;\n  }\n  // Allow reading from struct using array index\n  __host__ __device__ const T& operator[](int i) const\n  {\n    assert(i >= 0);\n    assert(i < 2);\n    return (i == 0) ? x : y;\n  }\n};\n\n// Scalar-Vector Multiplication\n__host__ __device__ inline float2 operator*(const float2& a, const float& b)\n{\n  return make_float2(a.x * b, a.y * b);\n}\n__host__ __device__ inline float3 operator*(const float3& a, const float& b)\n{\n  return make_float3(a.x * b, a.y * b, a.z * b);\n}\n__host__ __device__ inline float4 operator*(const float4& a, const float& b)\n{\n  return make_float4(a.x * b, a.y * b, a.z * b, a.w * b);\n}\n\n__host__ __device__ inline float2 operator*(const float& b, const float2& a)\n{\n  return make_float2(a.x * b, a.y * b);\n}\n__host__ __device__ inline float3 operator*(const float& b, const float3& a)\n{\n  return make_float3(a.x * b, a.y * b, a.z * b);\n}\n__host__ __device__ inline float4 operator*(const float& b, const float4& a)\n{\n  return make_float4(a.x * b, a.y * b, a.z * b, a.w * b);\n}\n\n// Scalar-Vector Addition\n__host__ __device__ inline float2 operator+(const float2& a, const float& b)\n{\n  return make_float2(a.x + b, a.y + b);\n}\n__host__ __device__ inline float3 operator+(const float3& a, const float& b)\n{\n  return make_float3(a.x + b, a.y + b, a.z + b);\n}\n__host__ __device__ inline float4 operator+(const float4& a, const float& b)\n{\n  return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);\n}\n\n// Scalar-Vector Subtraction\n__host__ __device__ inline float2 operator-(const float2& a, const float& b)\n{\n  return make_float2(a.x - b, a.y - b);\n}\n__host__ __device__ inline float3 operator-(const float3& a, const float& b)\n{\n  return make_float3(a.x - b, a.y - b, a.z - b);\n}\n__host__ __device__ inline float4 operator-(const float4& a, const float& b)\n{\n  return make_float4(a.x - b, a.y - b, a.z - b, a.w - b);\n}\n\n// Scalar-Vector Dvision\n__host__ __device__ inline float2 operator/(const float2& a, const float& b)\n{\n  return make_float2(a.x / b, a.y / b);\n}\n__host__ __device__ inline float3 operator/(const float3& a, const float& b)\n{\n  return make_float3(a.x / b, a.y / b, a.z / b);\n}\n__host__ __device__ inline float4 operator/(const float4& a, const float& b)\n{\n  return make_float4(a.x / b, a.y / b, a.z / b, a.w / b);\n}\n\n// Vector-Vector Addition\n__host__ __device__ inline float2 operator+(const float2& a, const float2& b)\n{\n  return make_float2(a.x + b.x, a.y + b.y);\n}\n__host__ __device__ inline float3 operator+(const float3& a, const float3& b)\n{\n  return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);\n}\n__host__ __device__ inline float4 operator+(const float4& a, const float4& b)\n{\n  return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);\n}\n\n// Vector-Vector Subtraction\n__host__ __device__ inline float2 operator-(const float2& a, const float2& b)\n{\n  return make_float2(a.x - b.x, a.y - b.y);\n}\n\n__host__ __device__ inline float3 operator-(const float3& a, const float3& b)\n{\n  return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);\n}\n\n__host__ __device__ inline float4 operator-(const float4& a, const float4& b)\n{\n  return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);\n}\n\n// Vector-Vector Multiplication\n__host__ __device__ inline float2 operator*(const float2& a, const float2& b)\n{\n  return make_float2(a.x * b.x, a.y * b.y);\n}\n\n__host__ __device__ inline float3 operator*(const float3& a, const float3& b)\n{\n  return make_float3(a.x * b.x, a.y * b.y, a.z * b.z);\n}\n\n__host__ __device__ inline float4 operator*(const float4& a, const float4& b)\n{\n  return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);\n}\n\n// Vector-Vector Division\n__host__ __device__ inline float2 operator/(const float2& a, const float2& b)\n{\n  return make_float2(a.x / b.x, a.y / b.y);\n}\n\n__host__ __device__ inline float3 operator/(const float3& a, const float3& b)\n{\n  return make_float3(a.x / b.x, a.y / b.y, a.z / b.z);\n}\n\n__host__ __device__ inline float4 operator/(const float4& a, const float4& b)\n{\n  return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w);\n}\n\n// Scalar-Vector multiply and set\n__host__ __device__ inline float2& operator*=(float2& a, const float& b)\n{\n  a.x *= b;\n  a.y *= b;\n  return a;\n}\n__host__ __device__ inline float3& operator*=(float3& a, const float& b)\n{\n  a.x *= b;\n  a.y *= b;\n  a.z *= b;\n  return a;\n}\n__host__ __device__ inline float4& operator*=(float4& a, const float& b)\n{\n  a.x *= b;\n  a.y *= b;\n  a.z *= b;\n  a.w *= b;\n  return a;\n}\n\n// Vector-Vector add and set\n__host__ __device__ inline float2& operator+=(float2& a, const float2& b)\n{\n  a.x += b.x;\n  a.y += b.y;\n  return a;\n}\n\n__host__ __device__ inline float3& operator+=(float3& a, const float3& b)\n{\n  a.x += b.x;\n  a.y += b.y;\n  a.z += b.z;\n  return a;\n}\n__host__ __device__ inline float4& operator+=(float4& a, const float4& b)\n{\n  a.x += b.x;\n  a.y += b.y;\n  a.z += b.z;\n  a.w += b.w;\n  return a;\n}\n\n__host__ __device__ inline float dot(const float2& a, const float2& b)\n{\n  return a.x * b.x + a.y * b.y;\n}\n\n__host__ __device__ inline float cross(const float2& a, const float2& b)\n{\n  return a.x * b.y - a.y * b.x;\n}\n\n__host__ __device__ inline float norm(const float2& a)\n{\n  return sqrtf(dot(a, a));\n}\n\n__host__ __device__ inline float dot(const float3& a, const float3& b)\n{\n  return a.x * b.x + a.y * b.y + a.z * b.z;\n}\n\n__host__ __device__ inline float norm(const float3& a)\n{\n  return sqrtf(dot(a, a));\n}\n\n__host__ __device__ inline float dot(const float4& a, const float4& b)\n{\n  return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;\n}\n\n__host__ __device__ inline float norm(const float4& a)\n{\n  return sqrtf(dot(a, a));\n}\n\n__host__ __device__ inline float2 operator-(const float2& a)\n{\n  return make_float2(-a.x, -a.y);\n}\n\n__host__ __device__ inline float3 operator-(const float3& a)\n{\n  return make_float3(-a.x, -a.y, -a.z);\n}\n\n__host__ __device__ inline float4 operator-(const float4& a)\n{\n  return make_float4(-a.x, -a.y, -a.z, -a.w);\n}\n\n__host__ __device__ inline bool operator==(const float2& lhs, const float2& rhs)\n{\n  return (lhs.x == rhs.x) && (lhs.y == rhs.y);\n}\n\n__host__ __device__ inline bool operator==(const float3& lhs, const float3& rhs)\n{\n  return (lhs.x == rhs.x) && (lhs.y == rhs.y) && (lhs.z == rhs.z);\n}\n\n__host__ __device__ inline bool operator==(const float4& lhs, const float4& rhs)\n{\n  return (lhs.x == rhs.x) && (lhs.y == rhs.y) && (lhs.z == rhs.z) && (lhs.w == rhs.w);\n}\n\n// Create different data types given the same input. Needed for handling border values in cudaTextures with Border\n// adress mode\ntemplate <class DATA_T>\ninline __host__ __device__ DATA_T createPartialCudaTuple(const float& r, const float& g, const float& b,\n                                                         const float& a);\n\ntemplate <>\ninline __host__ __device__ float createPartialCudaTuple<float>(const float& r, const float& g, const float& b,\n                                                               const float& a)\n{\n  return r;\n}\n\ntemplate <>\ninline __host__ __device__ float2 createPartialCudaTuple<float2>(const float& r, const float& g, const float& b,\n                                                                 const float& a)\n{\n  return make_float2(r, g);\n}\n\ntemplate <>\ninline __host__ __device__ float3 createPartialCudaTuple<float3>(const float& r, const float& g, const float& b,\n                                                                 const float& a)\n{\n  return make_float3(r, g, b);\n}\n\ntemplate <>\ninline __host__ __device__ float4 createPartialCudaTuple<float4>(const float& r, const float& g, const float& b,\n                                                                 const float& a)\n{\n  return make_float4(r, g, b, a);\n}\n\ntemplate <>\ninline __host__ __device__ int createPartialCudaTuple<int>(const float& r, const float& g, const float& b,\n                                                           const float& a)\n{\n  return static_cast<int>(r);\n}\n\ntemplate <>\ninline __host__ __device__ int2 createPartialCudaTuple<int2>(const float& r, const float& g, const float& b,\n                                                             const float& a)\n{\n  return make_int2(static_cast<int>(r), static_cast<int>(g));\n}\n\ntemplate <>\ninline __host__ __device__ int3 createPartialCudaTuple<int3>(const float& r, const float& g, const float& b,\n                                                             const float& a)\n{\n  return make_int3(static_cast<int>(r), static_cast<int>(g), static_cast<int>(b));\n}\n\ntemplate <>\ninline __host__ __device__ int4 createPartialCudaTuple<int4>(const float& r, const float& g, const float& b,\n                                                             const float& a)\n{\n  return make_int4(static_cast<int>(r), static_cast<int>(g), static_cast<int>(b), static_cast<int>(a));\n}\n#endif  // MPPIGENERIC_CUDA_MATH_UTILS_CUH\n"
  },
  {
    "path": "include/mppi/utils/eigen_type_conversions.h",
    "content": "#pragma once\n#include <cuda_runtime.h>\n#include <Eigen/Dense>\n\n__device__ __host__ Eigen::Vector3f cudaToEigen(const float3& v)\n{\n  return Eigen::Vector3f(v.x, v.y, v.z);\n}\n\n__device__ __host__ float3 EigenToCuda(const Eigen::Vector3f& v)\n{\n  return make_float3(v[0], v[1], v[2]);\n}\n"
  },
  {
    "path": "include/mppi/utils/file_utils.h",
    "content": "//\n// Created by jgibson37 on 1/9/20.\n//\n\n#ifndef MPPIGENERIC_FILE_UTILS_H\n#define MPPIGENERIC_FILE_UTILS_H\n\n#include <string>\n#include <unistd.h>\n\ninline bool fileExists(const std::string& name)\n{\n  return (access(name.c_str(), F_OK) != -1);\n}\n\n#endif  // MPPIGENERIC_FILE_UTILS_H\n"
  },
  {
    "path": "include/mppi/utils/gpu_err_chk.cuh",
    "content": "#ifndef CUDA_TESTING_GPU_ERR_CHK_CUH\n#define CUDA_TESTING_GPU_ERR_CHK_CUH\n\n#include <cuda_runtime.h>\n#include <cufft.h>\n#include <curand.h>\n#include <device_launch_parameters.h>  // For block idx and thread idx, etc\n#include <iostream>\n\n#ifndef DEPRECATED\n#if __cplusplus >= 201402L\n#define DEPRECATED [[deprecated]]\n#elif defined(__GNUC__) || defined(__clang__) || defined(__CUDACC__)\n#define DEPRECATED __attribute__((deprecated))\n#elif defined(_MSC_VER)\n#define DEPRECATED __declspec(deprecated)\n#else\n#pragma message(\"WARNING: You need to implement DEPRECATED for this compiler\")\n#define DEPRECATED\n#endif\n#endif\n// #ifndef __DEPRECATED__\n// #if defined(_WIN32)\n// # define __DEPRECATED__(msg) __declspec(deprecated(msg))\n// #elif (defined(__GNUC__) && (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 5 && !defined(__clang__))))\n// # define __DEPRECATED__(msg) __attribute__((deprecated))\n// #else\n// # define __DEPRECATED__(msg) __attribute__((deprecated(msg)))\n// #endif\n// #endif\n\ninline void gpuAssert(cudaError_t code, const char* file, int line, bool abort = true)\n{\n  if (code != cudaSuccess)\n  {\n    fprintf(stderr, \"GPUassert: %s %s %d\\n\", cudaGetErrorString(code), file, line);\n    if (abort)\n      exit(code);\n  }\n}\n\ninline void __cudaCheckError(const char* file, const int line)\n{\n  cudaError err = cudaGetLastError();\n  if (cudaSuccess != err)\n  {\n    fprintf(stderr, \"cudaCheckError() failed at %s:%i : %s\\n\", file, line, cudaGetErrorString(err));\n    exit(-1);\n  }\n\n  // More careful checking. However, this will affect performance.\n  // Comment away if needed.\n  err = cudaDeviceSynchronize();\n  if (cudaSuccess != err)\n  {\n    fprintf(stderr, \"cudaCheckError() with sync failed at %s:%i : %s\\n\", file, line, cudaGetErrorString(err));\n    exit(-1);\n  }\n}\n\ninline const char* cufftGetErrorString(cufftResult& code)\n{\n  // Codes from https://docs.nvidia.com/cuda/cufft/index.html#cufftresult\n  switch (code)\n  {\n    case CUFFT_SUCCESS:\n      return \"Success\";\n    case CUFFT_INVALID_PLAN:\n      return \"cuFFT was passed an invalid plan handle\";\n    case CUFFT_ALLOC_FAILED:\n      return \"cuFFT failed to allocate GPU or CPU memory\";\n    case CUFFT_INVALID_VALUE:\n      return \"User specified an invalid pointer or parameter\";\n    case CUFFT_INTERNAL_ERROR:\n      return \"Driver or internal cuFFT library error\";\n    case CUFFT_EXEC_FAILED:\n      return \"Failed to execute an FFT on the GPU\";\n    case CUFFT_SETUP_FAILED:\n      return \"The cuFFT library failed to initialize\";\n    case CUFFT_INVALID_SIZE:\n      return \"User specified an invalid transform size\";\n    case CUFFT_UNALIGNED_DATA:\n      return \"No longer used but unaligned data\";\n    case CUFFT_INVALID_DEVICE:\n      return \"Execution of a plan was on different GPU than plan creation\";\n    case CUFFT_NO_WORKSPACE:\n      return \"No workspace has been provided prior to plan execution\";\n    case CUFFT_NOT_IMPLEMENTED:\n      return \"Function does not implement functionality for parameters given.\";\n#if CUDART_VERSION < 13000\n    case CUFFT_INCOMPLETE_PARAMETER_LIST:\n      return \"Missing parameters in call\";\n    case CUFFT_PARSE_ERROR:\n      return \"Internal plan database error\";\n    case CUFFT_LICENSE_ERROR:\n      return \"License Error. Used in previous versions\";\n#else\n    case CUFFT_MISSING_DEPENDENCY:\n      return \"cuFFT is unable to find a dependency\";\n    case CUFFT_NVRTC_FAILURE:\n      return \"An NVRTC failure was encountered during a cuFFT operation\";\n    case CUFFT_NVJITLINK_FAILURE:\n      return \"An nvJitLink failure was encountered during a cuFFT operation\";\n    case CUFFT_NVSHMEM_FAILURE:\n      return \"An NVSHMEM failure was encountered during a cuFFT operation\";\n#endif\n    case CUFFT_NOT_SUPPORTED:\n      return \"Operation is not supported for parameters given.\";\n    default:\n      return \"cuFFT ERROR\";\n  }\n}\n\ninline const char* curandGetErrorString(curandStatus_t code)\n{\n  // Codes from https://docs.nvidia.com/cuda/curand/group__HOST.html#group__HOST_1gb94a31d5c165858c96b6c18b70644437\n  switch (code)\n  {\n    case CURAND_STATUS_SUCCESS:\n      return \"No errors.\";\n    case CURAND_STATUS_VERSION_MISMATCH:\n      return \"Header file and linked library version do not match.\";\n    case CURAND_STATUS_NOT_INITIALIZED:\n      return \"Generator not initialized.\";\n    case CURAND_STATUS_ALLOCATION_FAILED:\n      return \"Memory allocation failed.\";\n    case CURAND_STATUS_TYPE_ERROR:\n      return \"Generator is wrong type.\";\n    case CURAND_STATUS_OUT_OF_RANGE:\n      return \"Argument out of range.\";\n    case CURAND_STATUS_LENGTH_NOT_MULTIPLE:\n      return \"Length requested is not a multple of dimension.\";\n    case CURAND_STATUS_DOUBLE_PRECISION_REQUIRED:\n      return \"GPU does not have double precision required by MRG32k3a.\";\n    case CURAND_STATUS_LAUNCH_FAILURE:\n      return \"Kernel launch failure.\";\n    case CURAND_STATUS_PREEXISTING_FAILURE:\n      return \"Preexisting failure on library entry.\";\n    case CURAND_STATUS_INITIALIZATION_FAILED:\n      return \"Initialization of CUDA failed.\";\n    case CURAND_STATUS_ARCH_MISMATCH:\n      return \"Architecture mismatch, GPU does not support requested feature.\";\n    case CURAND_STATUS_INTERNAL_ERROR:\n      return \"Internal library error.\";\n    default:\n      return \"Cureand Error\";\n  }\n}\n\ninline void cufftAssert(cufftResult code, const char* file, int line, bool abort = true)\n{\n  if (code != CUFFT_SUCCESS)\n  {\n    fprintf(stderr, \"CUFFTassert: %s %s %d\\n\", cufftGetErrorString(code), file, line);\n    if (abort)\n    {\n      exit(code);\n    }\n  }\n}\n\ninline void curandAssert(curandStatus_t code, const char* file, int line, bool abort = true)\n{\n  if (code != CURAND_STATUS_SUCCESS)\n  {\n    fprintf(stderr, \"Curandassert: %s %s %d\\n\", curandGetErrorString(code), file, line);\n    if (abort)\n    {\n      exit(code);\n    }\n  }\n}\n\n#define CudaCheckError() __cudaCheckError(__FILE__, __LINE__)\n#define HANDLE_ERROR(ans)                                                                                              \\\n  {                                                                                                                    \\\n    gpuAssert((ans), __FILE__, __LINE__);                                                                              \\\n  }\n\n#define HANDLE_CUFFT_ERROR(ans)                                                                                        \\\n  {                                                                                                                    \\\n    cufftAssert((ans), __FILE__, __LINE__);                                                                            \\\n  }\n\n#define HANDLE_CURAND_ERROR(ans)                                                                                       \\\n  {                                                                                                                    \\\n    curandAssert((ans), __FILE__, __LINE__);                                                                           \\\n  }\n\n#endif  //! CUDA_TESTING_GPU_ERR_CHK_CUH\n"
  },
  {
    "path": "include/mppi/utils/logger.hpp",
    "content": "#pragma once\n/**\n * Created by Bogdan on 11/01/2023\n */\n\n#include <cstdio>\n#include <memory>\n#include <string>\n#include <vector>\n\nnamespace mppi\n{\nnamespace util\n{\nenum class LOG_LEVEL : int\n{\n  DEBUG = 0,\n  INFO,\n  WARNING,\n  ERROR,\n  NONE\n};\n\nstatic LOG_LEVEL GLOBAL_LOG_LEVEL = LOG_LEVEL::WARNING;\n\nconst char BLACK[] = \"\\033[0;30m\";\nconst char RED[] = \"\\033[0;31m\";\nconst char GREEN[] = \"\\033[0;32m\";\nconst char YELLOW[] = \"\\033[0;33m\";\nconst char BLUE[] = \"\\033[0;34m\";\nconst char MAGENTA[] = \"\\033[0;35m\";\nconst char CYAN[] = \"\\033[0;36m\";\nconst char WHITE[] = \"\\033[0;37m\";\nconst char RESET[] = \"\\033[0m\";\n\nclass MPPILogger\n{\npublic:\n  MPPILogger() = default;\n  MPPILogger(const MPPILogger& other) = default;\n  MPPILogger(MPPILogger&& other) = default;\n  virtual ~MPPILogger() = default;\n  MPPILogger& operator=(const MPPILogger& other) = default;\n  MPPILogger& operator=(MPPILogger&& other) = default;\n\n  explicit MPPILogger(LOG_LEVEL level)\n  {\n    setLogLevel(level);\n  }\n\n  /**\n   * @brief Set the Log Level\n   *\n   * @param level\n   */\n  void setLogLevel(const LOG_LEVEL& level)\n  {\n    log_level_ = level;\n  }\n\n  /**\n   * @brief Set the Output Stream\n   *\n   * @param output file stream to write (stdout, stderr, nullptr, etc.)\n   */\n  void setOutputStream(std::FILE* const output)\n  {\n    output_stream_ = output;\n  }\n\n  /**\n   * @brief Get the Log Level object\n   *\n   * @return LOG_LEVEL\n   */\n  LOG_LEVEL getLogLevel() const\n  {\n    return log_level_;\n  }\n\n  /**\n   * @brief Get the Output Stream object\n   *\n   * @return std::FILE*\n   */\n  std::FILE* getOutputStream() const\n  {\n    return output_stream_;\n  }\n\n  /**\n   * @brief Log debug messages using virtual debug_impl() method\n   *\n   * @tparam ...Args  variadic template type of args used in the format string\n   * @param fmt       format string used in printf\n   * @param args      extra args used by the format string fmt\n   */\n  template <typename... Args>\n  void debug(const char* fmt, Args const&... args)\n  {\n    std::string message = format_string(fmt, args...);\n    this->debug_impl(message);\n  }\n\n  /**\n   * @brief Log info messages using virtual info_impl() method\n   *\n   * @tparam ...Args  variadic template type of args used in the format string\n   * @param fmt       format string used in printf\n   * @param args      extra args used by the format string fmt\n   */\n  template <typename... Args>\n  void info(const char* fmt, Args const&... args)\n  {\n    std::string message = format_string(fmt, args...);\n    this->info_impl(message);\n  }\n\n  /**\n   * @brief Log warning messages using virtual warning_impl() method\n   *\n   * @tparam ...Args  variadic template type of args used in the format string\n   * @param fmt       format string used in printf\n   * @param args      extra args used by the format string fmt\n   */\n  template <typename... Args>\n  void warning(const char* fmt, Args const&... args)\n  {\n    std::string message = format_string(fmt, args...);\n    this->warning_impl(message);\n  }\n\n  /**\n   * @brief Log errror messages using virtual errror_impl() method\n   *\n   * @tparam ...Args  variadic template type of args used in the format string\n   * @param fmt       format string used in printf\n   * @param args      extra args used by the format string fmt\n   */\n  template <typename... Args>\n  void error(const char* fmt, Args const&... args)\n  {\n    std::string message = format_string(fmt, args...);\n    this->error_impl(message);\n  }\n\nprotected:\n  LOG_LEVEL log_level_ = GLOBAL_LOG_LEVEL;\n  std::FILE* output_stream_ = stdout;\n\n  virtual void debug_impl(const std::string& message)\n  {\n    if (log_level_ <= LOG_LEVEL::DEBUG)\n    {\n      surround_fprintf(output_stream_, GREEN, RESET, message);\n    }\n  }\n\n  virtual void info_impl(const std::string& message)\n  {\n    if (log_level_ <= LOG_LEVEL::INFO)\n    {\n      surround_fprintf(output_stream_, CYAN, RESET, message);\n    }\n  }\n\n  virtual void warning_impl(const std::string& message)\n  {\n    if (log_level_ <= LOG_LEVEL::WARNING)\n    {\n      surround_fprintf(output_stream_, YELLOW, RESET, message);\n    }\n  }\n\n  virtual void error_impl(const std::string& message)\n  {\n    if (log_level_ <= LOG_LEVEL::ERROR)\n    {\n      surround_fprintf(output_stream_, RED, RESET, message);\n    }\n  }\n\n  /**\n   * @brief Print message to stream with coloring defined by prefix\n   *\n   * @param fstream  where the message will be printed to\n   * @param prefix   prefix string to print before message. Expected to be a color code\n   * @param suffix   suffix string to print after message. Expected to be a color reset code\n   * @param message  actual message to be printed\n   */\n  virtual void surround_fprintf(std::FILE* fstream, const char* prefix, const char* suffix, const std::string& message)\n  {\n    std::fprintf(fstream, \"%s%s%s\", prefix, message.c_str(), suffix);\n  }\n\n  /**\n   * @brief create a string out of format string and variable number of additional arguments\n   *\n   * @tparam ...Args  variadic template type for extra arguments passed to format_string()\n   * @param fmt       format string defining how to display additional arguments\n   * @param args      additional arguments for formatting\n   *\n   * @return          std::string containing formatted text\n   */\n  template <typename... Args>\n  std::string format_string(const char* fmt, Args const&... args)\n  {\n    // figure out size of formatted string\n    std::vector<char> buf(1 + std::snprintf(nullptr, 0, fmt, args...));\n    // Fill buffer with formatted string\n    std::snprintf(buf.data(), buf.size(), fmt, args...);\n    return std::string(buf.data());\n  }\n};\n\nusing MPPILoggerPtr = std::shared_ptr<MPPILogger>;\n}  // namespace util\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/utils/managed.cuh",
    "content": "/*\n * Software License Agreement (BSD License)\n * Copyright (c) 2013, Georgia Institute of Technology\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions are met:\n *\n * 1. Redistributions of source code must retain the above copyright notice, this\n * list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright notice,\n * this list of conditions and the following disclaimer in the documentation\n * and/or other materials provided with the distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n */\n/**********************************************\n * @file managed.cuh\n * @author Grady Williams <gradyrw@gmail.com>\n * @date May 24, 2017\n * @copyright 2017 Georgia Institute of Technology\n * @brief Class to be inherited by classes passed\n * to CUDA kernels. Helps unify stream management.\n ***********************************************/\n\n#ifndef MPPI_MANAGED_CUH_\n#define MPPI_MANAGED_CUH_\n\n#include \"gpu_err_chk.cuh\"\n#include <mppi/utils/logger.hpp>\n\n/**\n * @class Managed managed.cuh\n * @brief Class for setting the stream to be used by dynamics and cost functions used\n * by MPPIController.\n *\n * This class has one variable, which is the CUDA stream, and a function which sets the\n * stream. It is meant to be inherited by costs and dynamics classes which are passed into\n * the MPPIController kernels. In the past, this class used unified memory (hence the managed name),\n * so that classes could be passed by reference to CUDA kernels. However, as of right now,\n * the difficulties of using unified memory with multi-threaded CPU programs make getting\n * good performance with unified memory difficult, so this has been removed. Future implementations\n * may bring back the unified memory feature.\n */\nclass Managed\n{\npublic:\n  cudaStream_t stream_ = 0;  ///< The CUDA Stream that the class is bound too. 0 is the default (NULL) stream.\n\n  // true when allocated\n  bool GPUMemStatus_ = false;\n\n  Managed(cudaStream_t stream = 0)\n  {\n    this->bindToStream(stream);\n    auto logger = std::make_shared<mppi::util::MPPILogger>();\n    setLogger(logger);\n  }\n\n  /**\n  @brief Sets the stream and synchronizes the device.\n  @param stream is the CUDA stream that the object is assigned too.\n  */\n  void bindToStream(cudaStream_t stream)\n  {\n    stream_ = stream;\n    cudaDeviceSynchronize();\n  }\n\n  // REQUIRED: basic interface, make sure to implement in each class\n  // GPUSetup - allocates the GPU object\n  // freeCudaMem -> deallocates what is setup in GPUSetup\n  // getParams, setParams -> gets and sets the parameters\n  // paramsToDevice -> copies the parameters over to the GPU side\n\n  // OPTIONAL:\n  // printParams\n  // other printing methods\n\n  __host__ void setLogger(const mppi::util::MPPILoggerPtr& logger)\n  {\n    logger_ = logger;\n  }\n\n  __host__ void setLogLevel(const mppi::util::LOG_LEVEL& level)\n  {\n    logger_->setLogLevel(level);\n  }\n\n  __host__ mppi::util::MPPILoggerPtr getLogger()\n  {\n    return logger_;\n  }\n\n  __host__ mppi::util::MPPILoggerPtr getLogger() const\n  {\n    return logger_;\n  }\n\n  __device__ __host__ int getGrdSharedSizeBytes() const\n  {\n    return SHARED_MEM_REQUEST_GRD_BYTES;\n  }\n  __device__ __host__ int getBlkSharedSizeBytes() const\n  {\n    return SHARED_MEM_REQUEST_BLK_BYTES;\n  }\n\n\nprotected:\n  template <class T>\n  static T* GPUSetup(T* host_ptr)\n  {\n    // Allocate enough space on the GPU for the object\n    T* device_ptr;\n    cudaMalloc((void**)&device_ptr, sizeof(T));\n    // Cudamemcpy\n    HANDLE_ERROR(cudaMemcpyAsync(device_ptr, host_ptr, sizeof(T), cudaMemcpyHostToDevice, host_ptr->stream_));\n    cudaDeviceSynchronize();\n    host_ptr->GPUMemStatus_ = true;\n    return device_ptr;\n  }\n  mppi::util::MPPILoggerPtr logger_ = nullptr;\n\n  int SHARED_MEM_REQUEST_GRD_BYTES = 0;  ///< Amount of shared memory we need per BLOCK.\n  int SHARED_MEM_REQUEST_BLK_BYTES = 0;  ///< Amount of shared memory we need per ROLLOUT.\n\n  // TODO CRTP template this on the base class for allocation and dealloation\n};\n\n#endif /* MPPI_MANAGED_CUH_*/\n"
  },
  {
    "path": "include/mppi/utils/math_utils.h",
    "content": "/*\n * Created on Mon Jun 01 2020 by Bogdan\n *\n */\n#ifndef MATH_UTILS_H_\n#define MATH_UTILS_H_\n\n// Needed for sampling without replacement\n#include <chrono>\n#include <type_traits>\n#include <unordered_set>\n#include <random>\n#include <vector>\n#include <algorithm>\n\n#include <cuda_runtime.h>\n#include <Eigen/Dense>\n\n#include <mppi/utils/angle_utils.cuh>\n#include <mppi/utils/matrix_mult_utils.cuh>\n#include <mppi/utils/risk_utils.cuh>\n\n#ifndef SQ\n#define SQ(a) ((a) * (a))\n#endif  // SQ\n\n// For aligning parameters within structs such as a float array to 16 bytes\n// Ex: float name[size] MPPI_ALIGN(16) = {0.0f};\n#if defined(__CUDACC__)  // NVCC\n#define MPPI_ALIGN(n) __align__(n)\n#elif defined(__GNUC__)  // GCC\n#define MPPI_ALIGN(n) __attribute__((aligned(n)))\n#elif defined(_MSC_VER)  // MSVC\n#define MPPI_ALIGN(n) __declspec(align(n))\n#else\n#error \"Please provide a definition for MPPI_ALIGN macro for your host compiler!\"\n#endif\n\nnamespace mppi\n{\nnamespace math\n{\nnamespace matrix = ::mppi::matrix_multiplication;\n\nconst float GRAVITY = 9.81f;\n// Based off of https://gormanalysis.com/blog/random-numbers-in-cpp\ninline std::vector<int> sample_without_replacement(const int k, const int N,\n                                                   std::default_random_engine g = std::default_random_engine())\n{\n  if (k > N)\n  {\n    throw std::logic_error(\"Can't sample more than n times without replacement\");\n  }\n  // Create an unordered set to store the samples\n  std::unordered_set<int> samples;\n\n  // For loop runs k times\n  for (int r = N - k; r < N; r++)\n  {\n    if (r == 0)\n    {\n      samples.insert(N - 1);\n      continue;\n    }\n    int v = std::uniform_int_distribution<>(1, r)(g);  // sample between 1 and r\n    if (!samples.insert(v - 1).second)\n    {  // if v exists in the set\n      samples.insert(r - 1);\n    }\n  }\n  // Copy set into a vector\n  std::vector<int> final_sequence(samples.begin(), samples.end());\n  // Shuffle the vector to get the final sequence of sampling\n  std::shuffle(final_sequence.begin(), final_sequence.end(), g);\n  return final_sequence;\n}\n\ninline __host__ __device__ float expr(const float r, const float x)\n{\n  float mid_term = 1.0f + (r - 1.0f) * x;\n  return (mid_term > 0) * powf(mid_term, 1.0f / (r - 1.0f));\n}\n\n/**\n * Linear interpolation\n * Given two coordinates (x_min, y_min) and (x_max, y_max)\n * And the x location of a third (x), return the y location\n * along the line between the two points\n */\ninline __host__ __device__ float linInterp(const float x, const float x_min, const float x_max, const float y_min,\n                                           const float y_max)\n{\n  return (x - x_min) / (x_max - x_min) * (y_max - y_min) + y_min;\n}\n\n/**\n * Return the sign of a variable (1 for positive, -1 for negative, 0 for 0)\n **/\ntemplate <class T = float>\ninline __host__ __device__ int sign(const T& a)\n{\n  return (a > 0) - (a < 0);\n}\n\ninline __host__ __device__ int int_ceil(const int& a, const int& b)\n{\n  return a == 0 ? a : (a - 1) / b + 1;\n}\n\ninline constexpr __host__ __device__ int int_ceil_const(const int& a, const int& b)\n{\n  return a == 0 ? a : (a - 1) / b + 1;\n}\n\n/**\n * @brief gives back the next multiple of b larger than or equal to a.\n * For example, if a = 3, b = 2, this method returns 4\n *\n * @param a - int to be larger than\n * @param b - int to be multiple of\n * @return int - the next multiple of b larger than a\n */\ninline constexpr __host__ __device__ int int_multiple_const(const int& a, const int& b)\n{\n  return a == 0 ? a : ((a - 1) / b + 1) * b;\n}\n\n// Returns the int version of ceil(a/4)\ninline __host__ __device__ int nearest_quotient_4(const int& a)\n{\n  return int_ceil(a, 4);\n}\n\n// Returns the next multiple of 4 larger than or equal to a, Useful for calculating aligned memory sizes\ninline __host__ __device__ int nearest_multiple_4(const int& a)\n{\n  return int_ceil(a, 4) * 4;\n}\n\n/**\n * Calculates the normalized distance from the centerline\n * @param r - current radius\n * @param r_in - the inside radius of a track\n * @param r_out - the outside radius of a track\n * @return norm_dist - a normalized distance away from the centerline\n * norm_dist = 0 -> on the centerline\n * norm_dist = 1 -> on one of the track boundaries inner, or outer\n */\ninline __host__ __device__ float normDistFromCenter(const float r, const float r_in, const float r_out)\n{\n  float r_center = (r_in + r_out) / 2.0f;\n  float r_width = (r_out - r_in);\n  float dist_from_center = fabsf(r - r_center);\n  float norm_dist = dist_from_center / (r_width * 0.5f);\n  return norm_dist;\n}\n\n/**\n * Multiply two quaternions together which gives you their rotations added together\n * q_3 = q_1 x q_2\n * Inputs:\n *  q_1 - first quaternion\n *  q_2 - second quaternion\n *  q_3 - output quaternion\n */\ninline __host__ __device__ void QuatMultiply(const float q_1[4], const float q_2[4], float q_3[4],\n                                             bool normalize = true)\n{\n  q_3[0] = q_1[0] * q_2[0] - q_1[1] * q_2[1] - q_1[2] * q_2[2] - q_1[3] * q_2[3];\n  q_3[1] = q_1[1] * q_2[0] + q_1[0] * q_2[1] - q_1[3] * q_2[2] + q_1[2] * q_2[3];\n  q_3[2] = q_1[2] * q_2[0] + q_1[3] * q_2[1] + q_1[0] * q_2[2] - q_1[1] * q_2[3];\n  q_3[3] = q_1[3] * q_2[0] - q_1[2] * q_2[1] + q_1[1] * q_2[2] + q_1[0] * q_2[3];\n  if (normalize)\n  {\n#ifdef __CUDA_ARCH__\n    float inv_norm = rsqrtf(SQ(q_3[0]) + SQ(q_3[1]) + SQ(q_3[2]) + SQ(q_3[3]));\n#else\n    float inv_norm = 1.0f / sqrtf(SQ(q_3[0]) + SQ(q_3[1]) + SQ(q_3[2]) + SQ(q_3[3]));\n#endif\n    __UNROLL(4)\n    for (int i = 0; i < 4; i++)\n    {\n      q_3[i] *= inv_norm;\n    }\n  }\n}\n\ninline __host__ __device__ void QuatInv(const float q[4], float q_inv[4])\n{\n#ifdef __CUDA_ARCH__\n  float inv_norm = rsqrtf(SQ(q[0]) + SQ(q[1]) + SQ(q[2]) + SQ(q[3]));\n#else\n  float inv_norm = 1.0f / sqrtf(SQ(q[0]) + SQ(q[1]) + SQ(q[2]) + SQ(q[3]));\n#endif\n  q_inv[0] = q[0] * inv_norm;\n  q_inv[1] = -q[1] * inv_norm;\n  q_inv[2] = -q[2] * inv_norm;\n  q_inv[3] = -q[3] * inv_norm;\n}\n\n/**\n * Calculate the rotation required to get from q_1 to q_2\n * In Euler angles, this would be direct subraction but\n * in quaternions, it doesn't quite work that way\n */\ninline __device__ void QuatSubtract(float q_1[4], float q_2[4], float q_3[4])\n{\n  float q_1_inv[4];\n  QuatInv(q_1, q_1_inv);\n  QuatMultiply(q_2, q_1_inv, q_3);\n}\n\n/*\n * The Euler rotation sequence is 3-2-1 (roll, pitch, yaw) from Body to World\n */\ninline __device__ void Euler2QuatNWU(const double& r, const double& p, const double& y, double q[4])\n{\n  double phi_2 = r / 2.0;\n  double theta_2 = p / 2.0;\n  double psi_2 = y / 2.0;\n  double cos_phi_2 = cos(phi_2);\n  double sin_phi_2 = sin(phi_2);\n  double cos_theta_2 = cos(theta_2);\n  double sin_theta_2 = sin(theta_2);\n  double cos_psi_2 = cos(psi_2);\n  double sin_psi_2 = sin(psi_2);\n\n  q[0] = cos_phi_2 * cos_theta_2 * cos_psi_2 + sin_phi_2 * sin_theta_2 * sin_psi_2;\n  q[1] = -cos_phi_2 * sin_theta_2 * sin_psi_2 + cos_theta_2 * cos_psi_2 * sin_phi_2;\n  q[2] = cos_phi_2 * cos_psi_2 * sin_theta_2 + sin_phi_2 * cos_theta_2 * sin_psi_2;\n  q[3] = cos_phi_2 * cos_theta_2 * sin_psi_2 - sin_phi_2 * cos_psi_2 * sin_theta_2;\n}\n\n/*\n * The Euler rotation sequence is 3-2-1 (roll, pitch, yaw) from Body to World\n */\ninline __host__ __device__ void Euler2QuatNWU(const float& r, const float& p, const float& y, float q[4])\n{\n  float sin_phi_2, cos_phi_2, sin_theta_2, cos_theta_2, sin_psi_2, cos_psi_2;\n#ifdef __CUDA_ARCH__\n  float r_norm = angle_utils::normalizeAngle(r * 0.5f);\n  float p_norm = angle_utils::normalizeAngle(p * 0.5f);\n  float y_norm = angle_utils::normalizeAngle(y * 0.5f);\n  __sincosf(r_norm, &sin_phi_2, &cos_phi_2);\n  __sincosf(p_norm, &sin_theta_2, &cos_theta_2);\n  __sincosf(y_norm, &sin_psi_2, &cos_psi_2);\n#else\n  sincosf(r * 0.5f, &sin_phi_2, &cos_phi_2);\n  sincosf(p * 0.5f, &sin_theta_2, &cos_theta_2);\n  sincosf(y * 0.5f, &sin_psi_2, &cos_psi_2);\n#endif\n\n  q[0] = cos_phi_2 * cos_theta_2 * cos_psi_2 + sin_phi_2 * sin_theta_2 * sin_psi_2;\n  q[1] = -cos_phi_2 * sin_theta_2 * sin_psi_2 + cos_theta_2 * cos_psi_2 * sin_phi_2;\n  q[2] = cos_phi_2 * cos_psi_2 * sin_theta_2 + sin_phi_2 * cos_theta_2 * sin_psi_2;\n  q[3] = cos_phi_2 * cos_theta_2 * sin_psi_2 - sin_phi_2 * cos_psi_2 * sin_theta_2;\n}\n\n// (RPY rotation sequence)\n/*\n * Returns an euler sequence 3-2-1 (roll pitch yaw) that when applied takes you from body to world\n */\ninline __host__ __device__ void Quat2EulerNWU(const float q[4], float& r, float& p, float& y)\n{\n  r = atan2f(2.0f * q[3] * q[2] + 2.0f * q[0] * q[1], q[0] * q[0] + q[3] * q[3] - q[2] * q[2] - q[1] * q[1]);\n  float temp = -2.0f * q[0] * q[2] + 2.0f * q[1] * q[3];\n  // Clamp value between -1 and 1 to prevent NaNs\n  p = -asinf(fmaxf(fminf(1.0f, temp), -1.0f));\n  y = atan2f(2.0f * q[2] * q[1] + 2.0f * q[3] * q[0], q[0] * q[0] + q[1] * q[1] - q[2] * q[2] - q[3] * q[3]);\n}\n\ninline __host__ __device__ void Quat2DCM(const float q[4], float M[3][3])\n{\n  M[0][0] = SQ(q[0]) + SQ(q[1]) - SQ(q[2]) - SQ(q[3]);\n  M[0][1] = 2 * (q[1] * q[2] - q[0] * q[3]);\n  M[0][2] = 2 * (q[1] * q[3] + q[0] * q[2]);\n  M[1][0] = 2 * (q[1] * q[2] + q[0] * q[3]);\n  M[1][1] = SQ(q[0]) - SQ(q[1]) + SQ(q[2]) - SQ(q[3]);\n  M[1][2] = 2 * (q[2] * q[3] - q[0] * q[1]);\n  M[2][0] = 2 * (q[1] * q[3] - q[0] * q[2]);\n  M[2][1] = 2 * (q[2] * q[3] + q[0] * q[1]);\n  M[2][2] = SQ(q[0]) - SQ(q[1]) - SQ(q[2]) + SQ(q[3]);\n}\n\ninline __host__ __device__ void QuatSubtract(const Eigen::Quaternionf& q_1, const Eigen::Quaternionf& q_2,\n                                             Eigen::Quaternionf& q_3)\n{\n  q_3 = q_2 * q_1.inverse();\n}\n\ninline __host__ __device__ void QuatMultiply(const Eigen::Quaternionf& q_1, const Eigen::Quaternionf& q_2,\n                                             Eigen::Quaternionf& q_3, bool normalize = true)\n{\n  q_3 = q_1 * q_2;\n  if (normalize)\n  {\n    q_3.normalize();\n  }\n}\n\ninline __host__ __device__ void QuatInv(const Eigen::Quaternionf& q, Eigen::Quaternionf& q_f)\n{\n  q_f = q.inverse();\n}\n\n/*\n * rotates a point by the given quaternion\n */\ninline __host__ __device__ void RotatePointByQuat(const float q[4], const float3& point, float3& output)\n{\n  // converts the point into a quaternion format\n  float pq[4] = { 0.0f, point.x, point.y, point.z };\n  float q_inv[4];\n  float temp[4];\n  QuatInv(q, q_inv);\n  QuatMultiply(q, pq, temp, false);\n  QuatMultiply(temp, q_inv, pq, false);\n  // converts the quaternion back into a point\n  output = make_float3(pq[1], pq[2], pq[3]);\n}\n\n/*\n * rotates a point by the given quaternion\n */\ninline __host__ __device__ void RotatePointByQuat(const Eigen::Quaternionf& q, const float3& point, float3& output)\n{\n  // converts the point into a quaternion format\n  Eigen::Quaternionf q_inv, temp, pq;\n  pq.w() = 0.0f;\n  pq.x() = point.x;\n  pq.y() = point.y;\n  pq.z() = point.z;\n  QuatInv(q, q_inv);\n  QuatMultiply(q, pq, temp, false);\n  QuatMultiply(temp, q_inv, pq, false);\n  // converts the quaternion back into a point\n  output = make_float3(pq.x(), pq.y(), pq.z());\n}\n/*\n * rotates a point by the given quaternion\n */\ninline __host__ __device__ void RotatePointByQuat(const Eigen::Quaternionf& q, const Eigen::Ref<Eigen::Vector3f>& point,\n                                                  Eigen::Ref<Eigen::Vector3f> output)\n{\n  output = q * point;\n}\n\n/*\n * rotates a point by the given quaternion\n */\ninline __host__ __device__ void RotatePointByQuat(const float q[4], float3& point)\n{\n  RotatePointByQuat(q, point, point);\n}\n\n/*\n * rotates a point by the given quaternion\n */\ninline __host__ __device__ void RotatePointByQuat(const Eigen::Quaternionf& q, float3& point)\n{\n  RotatePointByQuat(q, point, point);\n}\n\n/*\n * rotates a point by the given quaternion\n */\ninline __host__ __device__ void RotatePointByQuat(const Eigen::Quaternionf& q, Eigen::Ref<Eigen::Vector3f> point)\n{\n  RotatePointByQuat(q, point, point);\n}\n\n/*\n * rotates a point by the given DCM Matrix. Transpose is used as M stored in row-major\n */\ninline __host__ __device__ void RotatePointByDCM(const float M[3][3], const float3& point, float3& output,\n                                                 matrix::MAT_OP operation = matrix::MAT_OP::NONE)\n{\n  if (operation == matrix::MAT_OP::NONE)\n  {\n    matrix::gemm1<3, 3, 1, p1::Parallel1Dir::NONE>((float*)M, (const float*)&point, (float*)&output, 1.0f, 0.0f,\n                                                   matrix::MAT_OP::TRANSPOSE);\n  }\n  else if (operation == matrix::MAT_OP::TRANSPOSE)\n  {\n    matrix::gemm1<3, 3, 1, p1::Parallel1Dir::NONE>((float*)M, (const float*)&point, (float*)&output, 1.0f, 0.0f,\n                                                   matrix::MAT_OP::NONE);\n  }\n}\n\n/*\n * rotates a point by the given DCM Matrix\n */\ninline __host__ __device__ void RotatePointByDCM(const Eigen::Ref<Eigen::Matrix3f>& M, const float3& point,\n                                                 float3& output, matrix::MAT_OP operation = matrix::MAT_OP::NONE)\n{\n  Eigen::Map<Eigen::Vector3f> point_eigen((float*)&point);\n  Eigen::Map<Eigen::Vector3f> output_eigen((float*)&output);\n  if (operation == matrix::MAT_OP::NONE)\n  {\n    output_eigen = M * point_eigen;\n  }\n  else if (operation == matrix::MAT_OP::TRANSPOSE)\n  {\n    output_eigen = M.transpose() * point_eigen;\n  }\n}\n\n/*\n * rotates a point by the given DCM Matrix\n */\ninline __host__ __device__ void RotatePointByDCM(const Eigen::Ref<Eigen::Matrix3f>& M,\n                                                 const Eigen::Ref<Eigen::Vector3f>& point,\n                                                 Eigen::Ref<Eigen::Vector3f> output,\n                                                 matrix::MAT_OP operation = matrix::MAT_OP::NONE)\n{\n  if (operation == matrix::MAT_OP::NONE)\n  {\n    output = M * point;\n  }\n  else if (operation == matrix::MAT_OP::TRANSPOSE)\n  {\n    output = M.transpose() * point;\n  }\n}\n\n/*\n * The Euler rotation sequence is 3-2-1 (roll, pitch, yaw) from Body to World\n */\ninline __host__ __device__ void Euler2QuatNWU(const float& r, const float& p, const float& y, Eigen::Quaternionf& q)\n{\n  // double psi = clamp_radians(euler.roll);\n  // double theta = clamp_radians(euler.pitch);\n  // double phi = clamp_radians(euler.yaw);\n  float sin_phi_2, cos_phi_2, sin_theta_2, cos_theta_2, sin_psi_2, cos_psi_2;\n#ifdef __CUDA_ARCH__\n  float r_norm = angle_utils::normalizeAngle(r * 0.5f);\n  float p_norm = angle_utils::normalizeAngle(p * 0.5f);\n  float y_norm = angle_utils::normalizeAngle(y * 0.5f);\n  __sincosf(r_norm, &sin_phi_2, &cos_phi_2);\n  __sincosf(p_norm, &sin_theta_2, &cos_theta_2);\n  __sincosf(y_norm, &sin_psi_2, &cos_psi_2);\n#else\n  sincosf(r * 0.5f, &sin_phi_2, &cos_phi_2);\n  sincosf(p * 0.5f, &sin_theta_2, &cos_theta_2);\n  sincosf(y * 0.5f, &sin_psi_2, &cos_psi_2);\n#endif\n\n  q.w() = cos_phi_2 * cos_theta_2 * cos_psi_2 + sin_phi_2 * sin_theta_2 * sin_psi_2;\n  q.x() = -cos_phi_2 * sin_theta_2 * sin_psi_2 + cos_theta_2 * cos_psi_2 * sin_phi_2;\n  q.y() = cos_phi_2 * cos_psi_2 * sin_theta_2 + sin_phi_2 * cos_theta_2 * sin_psi_2;\n  q.z() = cos_phi_2 * cos_theta_2 * sin_psi_2 - sin_phi_2 * cos_psi_2 * sin_theta_2;\n}\n\n/*\n * The Euler rotation sequence is 3-2-1 (roll, pitch, yaw) from Body to World\n */\ninline __host__ __device__ void Euler2DCM_NWU(const float& r, const float& p, const float& y, float M[3][3])\n{\n  float sin_phi, cos_phi, sin_theta, cos_theta, sin_psi, cos_psi;\n#ifdef __CUDA_ARCH__\n  float r_norm = angle_utils::normalizeAngle(r);\n  float p_norm = angle_utils::normalizeAngle(p);\n  float y_norm = angle_utils::normalizeAngle(y);\n  __sincosf(r_norm, &sin_phi, &cos_phi);\n  __sincosf(p_norm, &sin_theta, &cos_theta);\n  __sincosf(y_norm, &sin_psi, &cos_psi);\n#else\n  sincosf(r, &sin_phi, &cos_phi);\n  sincosf(p, &sin_theta, &cos_theta);\n  sincosf(y, &sin_psi, &cos_psi);\n#endif\n\n  M[0][0] = cos_theta * cos_psi;\n  M[0][1] = sin_phi * sin_theta * cos_psi - cos_phi * sin_psi;\n  M[0][2] = cos_phi * sin_theta * cos_psi + sin_phi * sin_psi;\n  M[1][0] = cos_theta * sin_psi;\n  M[1][1] = sin_phi * sin_theta * sin_psi + cos_phi * cos_psi;\n  M[1][2] = cos_phi * sin_theta * sin_psi - sin_phi * cos_psi;\n  M[2][0] = -sin_theta;\n  M[2][1] = sin_phi * cos_theta;\n  M[2][2] = cos_phi * cos_theta;\n}\n\n/*\n * The Euler rotation sequence is 3-2-1 (roll, pitch, yaw) from Body to World\n */\ninline __host__ __device__ void Euler2DCM_NWU(const float& r, const float& p, const float& y,\n                                              Eigen::Ref<Eigen::Matrix3f> M)\n{\n  float sin_phi, cos_phi, sin_theta, cos_theta, sin_psi, cos_psi;\n#ifdef __CUDA_ARCH__\n  float r_norm = angle_utils::normalizeAngle(r);\n  float p_norm = angle_utils::normalizeAngle(p);\n  float y_norm = angle_utils::normalizeAngle(y);\n  __sincosf(r_norm, &sin_phi, &cos_phi);\n  __sincosf(p_norm, &sin_theta, &cos_theta);\n  __sincosf(y_norm, &sin_psi, &cos_psi);\n#else\n  sincosf(r, &sin_phi, &cos_phi);\n  sincosf(p, &sin_theta, &cos_theta);\n  sincosf(y, &sin_psi, &cos_psi);\n#endif\n\n  M(0, 0) = cos_theta * cos_psi;\n  M(0, 1) = sin_phi * sin_theta * cos_psi - cos_phi * sin_psi;\n  M(0, 2) = cos_phi * sin_theta * cos_psi + sin_phi * sin_psi;\n  M(1, 0) = cos_theta * sin_psi;\n  M(1, 1) = sin_phi * sin_theta * sin_psi + cos_phi * cos_psi;\n  M(1, 2) = cos_phi * sin_theta * sin_psi - sin_phi * cos_psi;\n  M(2, 0) = -sin_theta;\n  M(2, 1) = sin_phi * cos_theta;\n  M(2, 2) = cos_phi * cos_theta;\n}\n\n// (RPY rotation sequence)\n/*\n * Returns an euler sequence 3-2-1 (roll pitch yaw) that when applied takes you from body to world\n */\ninline void __host__ __device__ Quat2EulerNWU(const Eigen::Quaternionf& q, float& r, float& p, float& y)\n{\n  r = atan2f(2.0f * q.z() * q.y() + 2.0f * q.w() * q.x(),\n             q.w() * q.w() + q.z() * q.z() - q.y() * q.y() - q.x() * q.x());\n  float temp = -2.0f * q.w() * q.y() + 2.0f * q.x() * q.z();\n  p = -asinf(fmaxf(-1.0f, fminf(temp, 1.0f)));\n  y = atan2f(2.0f * q.y() * q.x() + 2.0f * q.z() * q.w(),\n             q.w() * q.w() + q.x() * q.x() - q.y() * q.y() - q.z() * q.z());\n}\n\ninline void Quat2DCM(const Eigen::Quaternionf& q, Eigen::Ref<Eigen::Matrix3f> DCM)\n{\n  DCM = q.toRotationMatrix();\n}\n\ninline __device__ void omega2edot(const float p, const float q, const float r, const float e[4], float ed[4])\n{\n  ed[0] = 0.5f * (-p * e[1] - q * e[2] - r * e[3]);\n  ed[1] = 0.5f * (p * e[0] - q * e[3] + r * e[2]);\n  ed[2] = 0.5f * (p * e[3] + q * e[0] - r * e[1]);\n  ed[3] = 0.5f * (-p * e[2] + q * e[1] + r * e[0]);\n}\n\n// Can't use Eigen::Ref on Quaternions\ninline void omega2edot(const float p, const float q, const float r, const Eigen::Quaternionf& e, Eigen::Quaternionf& ed)\n{\n  ed.w() = 0.5f * (-p * e.x() - q * e.y() - r * e.z());\n  ed.x() = 0.5f * (p * e.w() - q * e.z() + r * e.y());\n  ed.y() = 0.5f * (p * e.z() + q * e.w() - r * e.x());\n  ed.z() = 0.5f * (-p * e.y() + q * e.x() + r * e.w());\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseQuat(const float3& offset, const float3& body_pose,\n                                                          const float q[4], float3& output)\n{\n  // rotate body vector into world frame\n  float3 rotated_offset = make_float3(offset.x, offset.y, offset.z);\n  RotatePointByQuat(q, rotated_offset);\n  // add offset to body pose\n  output.x = body_pose.x + rotated_offset.x;\n  output.y = body_pose.y + rotated_offset.y;\n  output.z = body_pose.z + rotated_offset.z;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseQuat(const float3& offset, const float3& body_pose,\n                                                          const Eigen::Quaternionf& q, float3& output)\n{\n  // rotate body vector into world frame\n  float3 rotated_offset = make_float3(offset.x, offset.y, offset.z);\n  RotatePointByQuat(q, rotated_offset);\n  // add offset to body pose\n  output.x = body_pose.x + rotated_offset.x;\n  output.y = body_pose.y + rotated_offset.y;\n  output.z = body_pose.z + rotated_offset.z;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseQuat(const Eigen::Ref<Eigen::Vector3f>& offset,\n                                                          const Eigen::Ref<Eigen::Vector3f>& body_pose,\n                                                          const Eigen::Quaternionf& q,\n                                                          Eigen::Ref<Eigen::Vector3f> output)\n{\n  RotatePointByQuat(q, offset, output);\n  // add offset to body pose\n  output += body_pose;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseEuler(const float3& offset, const float3& body_pose,\n                                                           const float3& rotation, float3& output)\n{\n  // convert RPY to Rotation Matrix\n  float M[3][3];\n  math::Euler2DCM_NWU(rotation.x, rotation.y, rotation.z, M);\n  RotatePointByDCM(M, offset, output);\n\n  // add offset to body pose\n  output.x += body_pose.x;\n  output.y += body_pose.y;\n  output.z += body_pose.z;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseEuler(const float3& offset, const float3& body_pose,\n                                                           const Eigen::Ref<Eigen::Vector3f>& rotation, float3& output)\n{\n  // convert RPY to Rotation Matrix\n  float M[3][3];\n  math::Euler2DCM_NWU(rotation.x(), rotation.y(), rotation.z(), M);\n  RotatePointByDCM(M, offset, output);\n\n  // add offset to body pose\n  output.x += body_pose.x;\n  output.y += body_pose.y;\n  output.z += body_pose.z;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseEuler(const Eigen::Ref<Eigen::Vector3f>& offset,\n                                                           const Eigen::Ref<Eigen::Vector3f>& body_pose,\n                                                           const Eigen::Ref<Eigen::Vector3f>& rotation,\n                                                           Eigen::Ref<Eigen::Vector3f> output)\n{\n  // convert RPY to Rotation Matrix\n  Eigen::Matrix3f M;\n  math::Euler2DCM_NWU(rotation.x(), rotation.y(), rotation.z(), M);\n  RotatePointByDCM(M, offset, output);\n  // add offset to body pose\n  output += body_pose;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseDCM(const float3& offset, const float3& body_pose,\n                                                         const float rotation[3][3], float3& output)\n{\n  RotatePointByDCM(rotation, offset, output);\n\n  // add offset to body pose\n  output.x += body_pose.x;\n  output.y += body_pose.y;\n  output.z += body_pose.z;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseDCM(const float3& offset, const float3& body_pose,\n                                                         const Eigen::Ref<Eigen::Matrix3f>& rotation, float3& output)\n{\n  RotatePointByDCM(rotation, offset, output);\n\n  // add offset to body pose\n  output.x += body_pose.x;\n  output.y += body_pose.y;\n  output.z += body_pose.z;\n}\n\ninline __host__ __device__ void bodyOffsetToWorldPoseDCM(const Eigen::Ref<Eigen::Vector3f>& offset,\n                                                         const Eigen::Ref<Eigen::Vector3f>& body_pose,\n                                                         const Eigen::Ref<Eigen::Matrix3f>& rotation,\n                                                         Eigen::Ref<Eigen::Vector3f> output)\n{\n  RotatePointByDCM(rotation, offset, output);\n\n  // add offset to body pose\n  output += body_pose;\n}\n\ninline __device__ __host__ Eigen::Matrix3f skewSymmetricMatrix(Eigen::Vector3f& v)\n{\n  Eigen::Matrix3f m;\n  m << 0.0f, -v[2], v[1], v[2], 0.0f, -v[0], -v[1], v[0], 0.0f;\n  return m;\n}\n\ninline __host__ double timeDiffms(const std::chrono::steady_clock::time_point& end,\n                                  const std::chrono::steady_clock::time_point& start)\n{\n  return (end - start).count() / 1e6;\n}\n\ninline __host__ __device__ double normalCDF(double x)\n{\n  return 0.5 * erfc(-x * M_SQRT1_2);\n}\n\ninline __host__ std::vector<double> calculateCk(size_t steps)\n{\n  // calculate params only when more steps are required\n  static std::vector<double> c_vec;\n  if (c_vec.size() < steps)\n  {\n    c_vec.resize(steps, 0);\n    c_vec[0] = 1.0;\n    for (size_t k = 1; k <= steps; k++)\n    {\n      double c_k = 0;\n      for (size_t m = 0; m < k; m++)\n      {\n        c_k += c_vec[m] * c_vec[k - 1 - m] / ((m + 1.0) * (2.0 * m + 1.0));\n      }\n      c_vec[k] = c_k;\n    }\n  }\n  return c_vec;\n}\n\n/**\n * Implementation based on\n * https://en.wikipedia.org/wiki/Error_function#Inverse_functions and Horner's\n * method\n */\ninline __host__ double inverseErrorFunc(double x, int num_precision = 5)\n{\n  std::vector<double> c_k = calculateCk(num_precision);\n  double output = 0;\n  for (int i = num_precision; i > 0; i--)\n  {\n    output = (c_k[i] / (2.0 * i + 1.0) + output) * x * x * M_PI / 4.0;\n  }\n  output = (output + c_k[0]) * x / M_2_SQRTPI;\n  return output;\n}\n\ninline __host__ double inverseErrorFuncSlow(double x, int num_precision = 5)\n{\n  std::vector<double> c_k = calculateCk(num_precision);\n  double slow_output = 0;\n  for (int i = 0; i <= num_precision; i++)\n  {\n    slow_output += c_k[i] / (2.0 * i + 1.0) * std::pow(x / M_2_SQRTPI, 2 * i + 1);\n  }\n  return slow_output;\n}\n\n/**\n * https://en.wikipedia.org/wiki/Normal_distribution#Quantile_function\n */\ninline __host__ double inverseNormalCDF(double x, int num_precision = 10)\n{\n  return M_SQRT2 * inverseErrorFunc(2.0 * x - 1.0, num_precision);\n}\n\ninline __host__ double inverseNormalCDFSlow(double x, int num_precision = 10)\n{\n  return M_SQRT2 * inverseErrorFuncSlow(2.0 * x - 1.0, num_precision);\n}\n\ninline __device__ __host__ float clamp(float value, float min, float max)\n{\n  return fminf(fmaxf(value, min), max);\n}\n\ninline __device__ __host__ float sign(float value)\n{\n  return value >= 0 ? 1 : -1;\n}\n\n}  // namespace math\n\n}  // namespace mppi\n\n#endif  // MATH_UTILS_H_\n"
  },
  {
    "path": "include/mppi/utils/matrix_mult_utils.cuh",
    "content": "//\n// Created by Bogdan on 8/20/23\n//\n#pragma once\n#include <mppi/utils/parallel_utils.cuh>\n\nnamespace mppi\n{\nnamespace matrix_multiplication\n{\n/**\n * Utility Functions\n **/\ninline __host__ __device__ int2 const unravelColumnMajor(const int index, const int num_rows)\n{\n  int col = index / num_rows;\n  int row = index % num_rows;\n  return make_int2(row, col);\n}\n\ninline __host__ __device__ int2 const unravelRowMajor(const int index, const int num_cols)\n{\n  int row = index / num_cols;\n  int col = index % num_cols;\n  return make_int2(row, col);\n}\ninline __host__ __device__ constexpr int columnMajorIndex(const int row, const int col, const int num_rows)\n{\n  return col * num_rows + row;\n}\n\ninline __host__ __device__ constexpr int rowMajorIndex(const int row, const int col, const int num_cols)\n{\n  return row * num_cols + col;\n}\n\n/**\n * Utility Classes\n **/\nenum class MAT_OP : int\n{\n  NONE = 0,\n  TRANSPOSE\n};\n\ntemplate <int M, int N, class T = float>\nclass devMatrix\n{\npublic:\n  T* data = nullptr;\n  static constexpr int rows = M;\n  static constexpr int cols = N;\n  devMatrix(T* n_data)\n  {\n    data = n_data;\n  };\n\n  T operator()(const int i, const int j) const\n  {\n    return data[columnMajorIndex(i, j, rows)];\n  }\n};\n\n/**\n * @brief GEneral Matrix Multiplication\n * Conducts the operation\n * C = alpha * op(A) * op(B) + beta * C\n * on matrices of type T\n * TODO: Add transpose options like cuBLAS GEMM\n * Inputs:\n * op(A) - T-type column-major matrix of size M * K, stored in shared/global mem\n * op(B) - T-type column-major matrix of size K * N, stored in shared/global mem\n * alpha - T-type to multiply A * B\n * beta - T-type multipling C\n * A_OP - whether or not you should use A or A transpose\n * B_OP - whether or not you should use B or B transpose\n * Outputs:\n * C - float column-major matrix of size M * N, stored in shared/global mem\n *\n */\ntemplate <int M, int K, int N, p1::Parallel1Dir P_DIR = p1::Parallel1Dir::THREAD_Y, class T = float>\ninline __device__ __host__ void gemm1(const T* A, const T* B, T* C, const T alpha = 1, const T beta = 0,\n                                      const MAT_OP A_OP = MAT_OP::NONE, const MAT_OP B_OP = MAT_OP::NONE)\n{\n  int parallel_index;\n  int parallel_step;\n  int p, k;\n  p1::getParallel1DIndex<P_DIR>(parallel_index, parallel_step);\n  int2 mn;\n  const bool all_stride = (A_OP == MAT_OP::NONE) && (B_OP == MAT_OP::TRANSPOSE);\n  for (p = parallel_index; p < M * N; p += parallel_step)\n  {\n    T accumulator = 0;\n    mn = unravelColumnMajor(p, M);\n    if (K % 4 == 0 && sizeof(type4<T>) <= 16 && !all_stride)\n    {  // Fetch 4 B values using single load memory operator of up to 128 bits since B is contiguous wrt k\n      __UNROLL(10)\n      for (k = 0; k < K; k += 4)\n      {\n        if (A_OP == MAT_OP::NONE && B_OP == MAT_OP::NONE)\n        {\n          const type4<T> b_tmp = reinterpret_cast<const type4<T>*>(&B[columnMajorIndex(k, mn.y, K)])[0];\n          accumulator += A[columnMajorIndex(mn.x, k + 0, M)] * b_tmp[0];\n          accumulator += A[columnMajorIndex(mn.x, k + 1, M)] * b_tmp[1];\n          accumulator += A[columnMajorIndex(mn.x, k + 2, M)] * b_tmp[2];\n          accumulator += A[columnMajorIndex(mn.x, k + 3, M)] * b_tmp[3];\n        }\n        else if (A_OP == MAT_OP::TRANSPOSE && B_OP == MAT_OP::NONE)\n        {\n          const type4<T> b_tmp = reinterpret_cast<const type4<T>*>(&B[columnMajorIndex(k, mn.y, K)])[0];\n          const type4<T> a_tmp = reinterpret_cast<const type4<T>*>(&A[rowMajorIndex(mn.x, k, K)])[0];\n          accumulator += a_tmp[0] * b_tmp[0];\n          accumulator += a_tmp[1] * b_tmp[1];\n          accumulator += a_tmp[2] * b_tmp[2];\n          accumulator += a_tmp[3] * b_tmp[3];\n        }\n        else if (A_OP == MAT_OP::TRANSPOSE && B_OP == MAT_OP::TRANSPOSE)\n        {\n          // const type4<T> b_tmp = reinterpret_cast<const type4<T>*>(&B[columnMajorIndex(k, mn.y, K)])[0];\n          const type4<T> a_tmp = reinterpret_cast<const type4<T>*>(&A[rowMajorIndex(mn.x, k, K)])[0];\n          accumulator += a_tmp[0] * B[rowMajorIndex(k + 0, mn.y, N)];\n          accumulator += a_tmp[1] * B[rowMajorIndex(k + 1, mn.y, N)];\n          accumulator += a_tmp[2] * B[rowMajorIndex(k + 2, mn.y, N)];\n          accumulator += a_tmp[3] * B[rowMajorIndex(k + 3, mn.y, N)];\n        }\n      }\n    }\n    else if (K % 2 == 0 && sizeof(type2<T>) <= 16 && !all_stride)\n    {  // Fetch 2 B values using single load memory operator of up to 128 bits since B is contiguous wrt k\n      __UNROLL(10)\n      for (k = 0; k < K; k += 2)\n      {\n        if (A_OP == MAT_OP::NONE && B_OP == MAT_OP::NONE)\n        {\n          const type2<T> b_tmp = reinterpret_cast<const type2<T>*>(&B[columnMajorIndex(k, mn.y, K)])[0];\n          accumulator += A[columnMajorIndex(mn.x, k + 0, M)] * b_tmp[0];\n          accumulator += A[columnMajorIndex(mn.x, k + 1, M)] * b_tmp[1];\n        }\n        else if (A_OP == MAT_OP::TRANSPOSE && B_OP == MAT_OP::NONE)\n        {\n          const type2<T> b_tmp = reinterpret_cast<const type2<T>*>(&B[columnMajorIndex(k, mn.y, K)])[0];\n          const type2<T> a_tmp = reinterpret_cast<const type2<T>*>(&A[rowMajorIndex(mn.x, k, K)])[0];\n          accumulator += a_tmp[0] * b_tmp[0];\n          accumulator += a_tmp[1] * b_tmp[1];\n        }\n        else if (A_OP == MAT_OP::TRANSPOSE && B_OP == MAT_OP::TRANSPOSE)\n        {\n          const type2<T> a_tmp = reinterpret_cast<const type2<T>*>(&A[rowMajorIndex(mn.x, k, K)])[0];\n          accumulator += a_tmp[0] * B[rowMajorIndex(k + 0, mn.y, N)];\n          accumulator += a_tmp[1] * B[rowMajorIndex(k + 1, mn.y, N)];\n        }\n      }\n    }\n    else\n    {  // Either K is odd or sizeof(T) is large enough that\n      T a;\n      T b;\n      __UNROLL(10)\n      for (k = 0; k < K; k++)\n      {\n        if (A_OP == MAT_OP::NONE && B_OP == MAT_OP::NONE)\n        {\n          a = A[columnMajorIndex(mn.x, k, M)];\n          b = B[columnMajorIndex(k, mn.y, K)];\n        }\n        else if (A_OP == MAT_OP::TRANSPOSE && B_OP == MAT_OP::NONE)\n        {\n          a = A[rowMajorIndex(mn.x, k, K)];\n          b = B[columnMajorIndex(k, mn.y, K)];\n        }\n        else if (A_OP == MAT_OP::TRANSPOSE && B_OP == MAT_OP::TRANSPOSE)\n        {\n          a = A[rowMajorIndex(mn.x, k, K)];\n          b = B[rowMajorIndex(k, mn.y, N)];\n        }\n        else\n        {\n          a = A[columnMajorIndex(mn.x, k, M)];\n          b = B[rowMajorIndex(k, mn.y, N)];\n        }\n\n        accumulator += a * b;\n      }\n    }\n    if (beta == 0)\n    {  // Special case to remove extraneous memory accesses\n      C[p] = alpha * accumulator;\n    }\n    else\n    {\n      C[p] = alpha * accumulator + beta * C[p];\n    }\n  }\n}\n\n/**\n * @brief GEneral Matrix Multiplication\n * Conducts the operation\n * C = alpha * A * B + beta * C\n * using two parallelization directions\n * TODO: Add transpose options like cuBLAS GEMM\n * Inputs:\n * A - float column-major matrix of size M * K, stored in shared/global mem\n * B - float column-major matrix of size K * N, stored in shared/global mem\n * alpha - float to multiply A * B\n * beta - float multipling C\n * Outputs:\n * C - float column-major matrix of size M * N, stored in shared/global mem\n *\n */\ntemplate <int M, int K, int N, p2::Parallel2Dir P_DIR = p2::Parallel2Dir::THREAD_XY>\ninline __device__ void gemm2(const float* A, const float* B, float* C, const float alpha = 1.0f,\n                             const float beta = 0.0f)\n{\n  int m_ind_start;\n  int m_ind_size;\n  int n_ind_start;\n  int n_ind_size;\n  p2::getParallel2DIndex<P_DIR>(m_ind_start, n_ind_start, m_ind_size, n_ind_size);\n  for (int m = m_ind_start; m < M; m += m_ind_size)\n  {\n    for (int n = n_ind_start; n < N; n += n_ind_size)\n    {\n      float accumulator = 0;\n      __UNROLL(10)\n      for (int k = 0; k < K; k++)\n      {\n        accumulator += A[columnMajorIndex(m, k, M)] * B[columnMajorIndex(k, n, K)];\n      }\n      C[columnMajorIndex(m, n, M)] = alpha * accumulator + beta * C[columnMajorIndex(m, n, M)];\n    }\n  }\n}\n\n/**\n * @brief Perform Guass Jordan Elimination in place on columan-major MxN matrix A.\n * Useful for solving Cx = b where A = [C | b] as well as inverting matrices.\n *\n * @tparam M - number of rows\n * @tparam N - number of cols\n * @tparam P_DIR - Parallelization axes for on the GPU\n * @tparam T - type of data in A\n * @param A - column-major matrix of type T with M rows and N cols\n *\n * @return reduced row echelon form of A is returned in A.\n */\ntemplate <int M, int N, p1::Parallel1Dir P_DIR = p1::Parallel1Dir::THREAD_Y, class T = float>\ninline __host__ __device__ void GaussJordanElimination(T* A)\n{\n  int p_index, step;\n  p1::getParallel1DIndex<P_DIR>(p_index, step);\n  int row, col, offset = 0;\n  T accumulator;\n  for (int i = 0; i < M; i++)\n  {\n    // Check if row-swap is needed\n    row = i;\n    while (A[columnMajorIndex(row, i + offset, M)] == 0)\n    {\n      row++;\n      if (row == M)\n      {  // column is all zeros, need to move on to the next column\n        offset++;\n        if (i + offset >= N)\n        {  // Ran out of columns to check.\n          return;\n        }\n        row = i;\n      }\n    }\n    // swap rows if needed (row != i)\n    for (col = i + p_index; row != i && col < N; col += step)\n    {\n      accumulator = A[columnMajorIndex(row, col, M)];\n      A[columnMajorIndex(row, col, M)] = A[columnMajorIndex(i, col, M)];\n      A[columnMajorIndex(i, col, M)] = accumulator;\n    }\n    // normalize the current row\n    accumulator = 1.0f / A[columnMajorIndex(i, i + offset, M)];\n    for (col = i + offset + p_index; col < N; col += step)\n    {\n      A[columnMajorIndex(i, col, M)] *= accumulator;\n    }\n#ifdef __CUDA_ARCH__\n    __syncthreads();\n#endif\n    // Now eliminate pivot from both rows above and below\n    for (row = p_index; row < M; row += step)\n    {\n      if (row == i)\n      {\n        continue;\n      }\n      accumulator = -A[columnMajorIndex(row, i + offset, M)];\n      for (col = i + offset; col < N; col++)\n      {\n        A[columnMajorIndex(row, col, M)] += accumulator * A[columnMajorIndex(i, col, M)];\n      }\n    }\n#ifdef __CUDA_ARCH__\n    __syncthreads();\n#endif\n  }\n}\n\ntemplate <p1::Parallel1Dir P_DIR = p1::Parallel1Dir::NONE, int M = 1, int K = 1, int N = 1, class T = float>\nvoid matMult1(const devMatrix<M, K, T>& A, const devMatrix<K, N, T>& B, devMatrix<M, N, T>& C)\n{\n  gemm1<M, K, N, P_DIR, T>(A.data, B.data, C.data);\n}\n\n}  // namespace matrix_multiplication\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/fnn_helper.cu",
    "content": "//\n// Created by jason on 8/18/22.\n//\n\n#include \"fnn_helper.cuh\"\ntemplate <bool USE_SHARED>\nFNNHelper<USE_SHARED>::FNNHelper(const std::vector<int>& layers, cudaStream_t stream) : Managed(stream)\n{\n  setupMemory(layers);\n}\n\ntemplate <bool USE_SHARED>\nFNNHelper<USE_SHARED>::FNNHelper(std::string model_path, cudaStream_t stream) : Managed(stream)\n{\n  loadParams(model_path);\n}\n\ntemplate <bool USE_SHARED>\nFNNHelper<USE_SHARED>::FNNHelper(const cnpy::npz_t& param_dict, cudaStream_t stream) : Managed(stream)\n{\n  loadParams(param_dict);\n}\n\ntemplate <bool USE_SHARED>\nFNNHelper<USE_SHARED>::FNNHelper(const cnpy::npz_t& param_dict, std::string prefix, cudaStream_t stream)\n  : Managed(stream)\n{\n  loadParams(prefix, param_dict);\n}\n\ntemplate <bool USE_SHARED>\nFNNHelper<USE_SHARED>::~FNNHelper()\n{\n  if (this->GPUMemStatus_)\n  {\n    freeCudaMem();\n  }\n  delete theta_;\n  delete stride_idcs_;\n  delete net_structure_;\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::loadParams(const std::string& model_path)\n{\n  int i, j, k;\n  std::string bias_name = \"\";\n  std::string weight_name = \"\";\n  if (!fileExists(model_path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << model_path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(model_path);\n  loadParams(param_dict);\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::loadParams(const cnpy::npz_t& param_dict)\n{\n  loadParams(\"\", param_dict);\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::loadParams(std::string prefix, const cnpy::npz_t& param_dict, bool add_slash)\n{\n  int i, j, k;\n  std::string bias_name = \"\";\n  std::string weight_name = \"\";\n\n  if (add_slash && !prefix.empty() && *prefix.rbegin() != '/')\n  {\n    prefix.append(\"/\");\n  }\n\n  int counter = 1;\n  bias_name = prefix + \"dynamics_b\" + std::to_string(counter);\n  weight_name = prefix + \"dynamics_W\" + std::to_string(counter);\n\n  assert(static_cast<int>(param_dict.at(weight_name).num_vals) % static_cast<int>(param_dict.at(bias_name).num_vals) ==\n         0);\n\n  std::vector<int> layers = { static_cast<int>(param_dict.at(weight_name).num_vals) /\n                              static_cast<int>(param_dict.at(bias_name).num_vals) };\n  while (param_dict.find(bias_name) != param_dict.end())\n  {\n    layers.push_back(static_cast<int>(param_dict.at(bias_name).num_vals));\n    counter += 1;\n    bias_name = prefix + \"dynamics_b\" + std::to_string(counter);\n  }\n  // TODO more asserts for proper sizing of weights\n  // TODO only setup memory if it is different than stored\n  setupMemory(layers);\n\n  for (i = 0; i < NUM_LAYERS - 1; i++)\n  {\n    // NN index from 1\n    bias_name = prefix + \"dynamics_b\" + std::to_string(i + 1);\n    weight_name = prefix + \"dynamics_W\" + std::to_string(i + 1);\n\n    cnpy::NpyArray weight_i_raw = param_dict.at(weight_name);\n    cnpy::NpyArray bias_i_raw = param_dict.at(bias_name);\n    double* weight_i = weight_i_raw.data<double>();\n    double* bias_i = bias_i_raw.data<double>();\n\n    // copy over the weights\n    for (j = 0; j < net_structure_[i + 1]; j++)\n    {\n      for (k = 0; k < net_structure_[i]; k++)\n      {\n        theta_[stride_idcs_[2 * i] + j * net_structure_[i] + k] = weight_i[j * net_structure_[i] + k];\n      }\n    }\n    // copy over the bias\n    for (j = 0; j < net_structure_[i + 1]; j++)\n    {\n      theta_[stride_idcs_[2 * i + 1] + j] = bias_i[j];\n    }\n  }\n  for (int i = 0; i < NUM_PARAMS; i++)\n  {\n    assert(isfinite(theta_[i]));\n  }\n  changed_weights_ = true;\n  // Save parameters to GPU memory\n  paramsToDevice();\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::setupMemory(const std::vector<int>& layers)\n{\n  assert(layers.size() >= 2);\n\n  bool setupGPU = this->GPUMemStatus_;\n  // TODO should see if this is different or not\n  if (setupGPU)\n  {\n    freeCudaMem();\n  }\n\n  NUM_LAYERS = layers.size();\n  LARGEST_LAYER = layers[0];\n  STRIDE_SIZE = (NUM_LAYERS - 1) * 2;\n  for (int i = 1; i < layers.size(); i++)\n  {\n    // the +1 here is from the bias\n    NUM_PARAMS += (layers[i - 1] + 1) * layers[i];\n    if (layers[i] > LARGEST_LAYER)\n    {\n      LARGEST_LAYER = layers[i];\n    }\n  }\n  INPUT_DIM = layers.front();\n  OUTPUT_DIM = layers.back();\n  // TODO allocate more memory so we can copy as float4's\n  if (theta_ != nullptr)\n  {\n    delete theta_;\n    delete net_structure_;\n    delete stride_idcs_;\n  }\n\n  theta_ = (float*)::operator new(sizeof(float) * NUM_PARAMS);\n  net_structure_ = (int*)::operator new(sizeof(int) * NUM_LAYERS);\n  stride_idcs_ = (int*)::operator new(sizeof(int) * STRIDE_SIZE);\n\n  memset(theta_, 0.0, sizeof(float) * NUM_PARAMS);\n  memset(net_structure_, -1, sizeof(int) * NUM_LAYERS);\n  memset(stride_idcs_, -1, sizeof(int) * STRIDE_SIZE);\n\n  for (int i = 0; i < NUM_LAYERS; i++)\n  {\n    net_structure_[i] = layers[i];\n  }\n\n  int stride = 0;\n  for (int i = 0; i < NUM_LAYERS - 1; i++)\n  {\n    stride_idcs_[2 * i] = stride;\n    stride += net_structure_[i + 1] * net_structure_[i];\n    stride_idcs_[2 * i + 1] = stride;\n    stride += net_structure_[i + 1];\n  }\n\n  PARAM_SIZE = (NUM_LAYERS + NUM_PARAMS + STRIDE_SIZE) * sizeof(float);\n\n  this->SHARED_MEM_REQUEST_GRD_BYTES = mppi::math::int_multiple_const(PARAM_SIZE * USE_SHARED, sizeof(float4));\n  this->SHARED_MEM_REQUEST_BLK_BYTES =\n      mppi::math::int_multiple_const(2 * LARGEST_LAYER * sizeof(float), sizeof(float4));\n\n  weighted_in_.resize(NUM_LAYERS - 1);\n\n  // zeros out every matrix\n  for (int i = 1; i < NUM_LAYERS; i++)\n  {\n    weighted_in_[i - 1] = Eigen::MatrixXf::Zero(net_structure_[i], 1);\n  }\n\n  if (setupGPU)\n  {\n    GPUSetup();\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::updateModel(const std::vector<float>& data)\n{\n  assert(data.size() == NUM_PARAMS);\n  std::vector<int> description(NUM_LAYERS);\n  for (int i = 0; i < NUM_LAYERS; i++)\n  {\n    description[i] = net_structure_[i];\n  }\n  updateModel(description, data);\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::updateModel(const std::vector<int>& description, const std::vector<float>& data)\n{\n  // TODO can change the description of the network\n  for (int i = 0; i < description.size(); i++)\n  {\n    if (description[i] != net_structure_[i])\n    {\n      throw std::invalid_argument(\"Invalid model trying to to be set for NN\");\n    }\n  }\n  for (int i = 0; i < NUM_LAYERS - 1; i++)\n  {\n    for (int j = 0; j < net_structure_[i + 1]; j++)\n    {\n      for (int k = 0; k < net_structure_[i]; k++)\n      {\n        theta_[stride_idcs_[2 * i] + j * net_structure_[i] + k] = data[stride_idcs_[2 * i] + j * net_structure_[i] + k];\n      }\n    }\n  }\n  for (int i = 0; i < NUM_LAYERS - 1; i++)\n  {\n    for (int j = 0; j < net_structure_[i + 1]; j++)\n    {\n      theta_[stride_idcs_[2 * i + 1] + j] = data[stride_idcs_[2 * i + 1] + j];\n    }\n  }\n  for (int i = 0; i < NUM_PARAMS; i++)\n  {\n    assert(isfinite(theta_[i]));\n  }\n  changed_weights_ = true;\n  if (this->GPUMemStatus_)\n  {\n    paramsToDevice();\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFree(weights_d_));\n    HANDLE_ERROR(cudaFree(network_d_));\n    this->GPUMemStatus_ = false;\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::GPUSetup()\n{\n  if (!this->GPUMemStatus_)\n  {\n    network_d_ = Managed::GPUSetup<FNNHelper<USE_SHARED>>(this);\n    HANDLE_ERROR(cudaMalloc((void**)&(this->weights_d_), PARAM_SIZE));\n\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->weights_d_), &(weights_d_), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->theta_), &(weights_d_), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    float* incr_ptr = (weights_d_ + NUM_PARAMS);\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->stride_idcs_), &(incr_ptr), sizeof(int*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr = (weights_d_ + NUM_PARAMS + STRIDE_SIZE);\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->net_structure_), &(incr_ptr), sizeof(int*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    changed_weights_ = true;\n    paramsToDevice();\n  }\n  else\n  {\n    this->logger_->debug(\"FNN GPU Memory Already set\\n\");\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::paramsToDevice()\n{\n  if (this->GPUMemStatus_ && changed_weights_)\n  {\n    HANDLE_ERROR(\n        cudaMemcpyAsync(this->weights_d_, theta_, NUM_PARAMS * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n    float* incr_ptr = weights_d_ + NUM_PARAMS;\n    HANDLE_ERROR(\n        cudaMemcpyAsync(incr_ptr, stride_idcs_, STRIDE_SIZE * sizeof(int), cudaMemcpyHostToDevice, this->stream_));\n    incr_ptr = weights_d_ + NUM_PARAMS + STRIDE_SIZE;\n    HANDLE_ERROR(\n        cudaMemcpyAsync(incr_ptr, net_structure_, NUM_LAYERS * sizeof(int), cudaMemcpyHostToDevice, this->stream_));\n    changed_weights_ = false;\n  }\n}\n\ntemplate <bool USE_SHARED>\nbool FNNHelper<USE_SHARED>::computeGrad(const Eigen::Ref<const Eigen::MatrixXf>& input, Eigen::Ref<Eigen::MatrixXf> A)\n{\n  // compute forward to see gradient values\n  Eigen::VectorXf output = getZeroOutputVector();\n  forward(input, output);\n  return computeGrad(A);\n}\n\ntemplate <bool USE_SHARED>\nbool FNNHelper<USE_SHARED>::computeGrad(Eigen::Ref<Eigen::MatrixXf> A)\n{\n  // Start backprop\n  Eigen::MatrixXf ip_delta = Eigen::MatrixXf::Identity(OUTPUT_DIM, OUTPUT_DIM);\n\n  // Main backprop loop\n  for (int i = NUM_LAYERS - 2; i > 0; i--)\n  {\n    Eigen::MatrixXf zp = weighted_in_[i - 1];\n    for (int j = 0; j < net_structure_[i]; j++)\n    {\n      zp(j) = mppi::nn::tanh_deriv(zp(j));\n    }\n    Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> cur_weights(\n        theta_ + stride_idcs_[2 * i], net_structure_[i + 1], net_structure_[i]);\n    ip_delta = ((cur_weights).transpose() * ip_delta).eval();\n    for (int j = 0; j < OUTPUT_DIM; j++)\n    {\n      ip_delta.col(j) = ip_delta.col(j).array() * zp.array();\n    }\n  }\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> cur_weights(\n      theta_ + stride_idcs_[0], net_structure_[1], net_structure_[0]);\n\n  ip_delta = (((cur_weights).transpose()) * ip_delta).transpose().eval();\n  for (int i = 0; i < INPUT_DIM; i++)\n  {\n    A.col(i) = ip_delta.col(i);\n  }\n  return true;\n}\n\ntemplate <bool USE_SHARED>\nvoid FNNHelper<USE_SHARED>::forward(const Eigen::Ref<const Eigen::MatrixXf>& input, Eigen::Ref<Eigen::MatrixXf> output)\n{\n  int i, j;\n  Eigen::MatrixXf acts = input;\n  for (i = 0; i < NUM_LAYERS - 1; i++)\n  {\n    Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> cur_weights(\n        theta_ + stride_idcs_[2 * i], net_structure_[i + 1], net_structure_[i]);\n    Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> cur_bias(\n        theta_ + stride_idcs_[2 * i + 1], net_structure_[i + 1], 1);\n    weighted_in_[i] = (cur_weights * acts + cur_bias).eval();\n    acts = Eigen::MatrixXf::Zero(net_structure_[i + 1], 1);\n    if (i < NUM_LAYERS - 2)\n    {  // Last layer doesn't apply any non-linearity\n      for (j = 0; j < net_structure_[i + 1]; j++)\n      {\n        acts(j) = mppi::nn::tanh((weighted_in_[i])(j));  // Nonlinear component.\n      }\n    }\n    else\n    {\n      for (j = 0; j < net_structure_[i + 1]; j++)\n      {\n        acts(j) = (weighted_in_[i])(j);\n      }\n    }\n  }\n  output = acts;\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* FNNHelper<USE_SHARED>::initialize(float* theta_s)\n{\n  float* new_theta_s = theta_s;\n  if (USE_SHARED)\n  {\n    // if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && USE_SHARED != 0)\n    // {\n    //   memcpy(theta_s, theta_, NUM_PARAMS * sizeof(float));\n    //   new_theta_s += NUM_PARAMS;\n    //   memcpy(new_theta_s, stride_idcs_, STRIDE_SIZE * sizeof(int));\n    //   new_theta_s += STRIDE_SIZE;\n    //   memcpy(new_theta_s, net_structure_, NUM_LAYERS * sizeof(int));\n    //   new_theta_s += NUM_LAYERS;\n    // }\n    for (int i = blockDim.x * threadIdx.y + threadIdx.x; i < NUM_PARAMS; i += blockDim.x * blockDim.y)\n    {\n      theta_s[i] = theta_[i];\n    }\n    int* stride_idcs = (int*)(theta_s + NUM_PARAMS);\n    for (int i = threadIdx.y; i < STRIDE_SIZE; i += blockDim.y)\n    {\n      stride_idcs[i] = stride_idcs_[i];\n    }\n    int* net_structure = (int*)(theta_s + NUM_PARAMS + STRIDE_SIZE);\n    for (int i = threadIdx.y; i < NUM_LAYERS; i += blockDim.y)\n    {\n      net_structure[i] = net_structure_[i];\n    }\n    __syncthreads();\n  }\n  return new_theta_s;\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* FNNHelper<USE_SHARED>::forward(float* input, float* theta_s, float* curr_act)\n{\n  float* next_act;\n  float* tmp_act;\n  float tmp;\n  float* W;\n  float* b;\n  uint tdy = threadIdx.y;\n  uint i, j, k;\n  uint tdx = threadIdx.x;\n  uint tdz = threadIdx.z;\n\n  float* theta = theta_;\n  int* stride_idcs = stride_idcs_;\n  int* net_structure = net_structure_;\n  if (USE_SHARED != 0)\n  {\n    theta = theta_s;\n    stride_idcs = (int*)(theta_s + NUM_PARAMS);\n    net_structure = (int*)(theta_s + NUM_PARAMS + STRIDE_SIZE);\n  }\n\n  next_act = &curr_act[LARGEST_LAYER];\n\n  // iterate through the part of the state that should be an input to the NN\n  if (input != nullptr)\n  {\n    for (i = tdy; i < INPUT_DIM; i += blockDim.y)\n    {\n      curr_act[i] = input[i];\n    }\n    __syncthreads();\n  }\n  // iterate through each layer\n  for (i = 0; i < NUM_LAYERS - 1; i++)\n  {\n    W = &theta[stride_idcs[2 * i]];      // weights\n    b = &theta[stride_idcs[2 * i + 1]];  // biases\n\n    // for first non input layer until last layer this thread deals with\n    // calculates the next activation based on current\n    for (j = tdy; j < net_structure[i + 1]; j += blockDim.y)\n    {\n      tmp = 0;\n      // apply each neuron activation from current layer\n      for (k = 0; k < net_structure[i]; k++)\n      {\n        // No atomic add necessary.\n        tmp += W[j * net_structure[i] + k] * curr_act[k];\n      }\n      // add bias from next layer and neuron\n      tmp += b[j];\n      if (i < NUM_LAYERS - 2)\n      {\n        tmp = mppi::nn::tanh(tmp);\n      }\n      next_act[j] = tmp;\n    }\n    // Swap the two pointers\n    tmp_act = curr_act;\n    curr_act = next_act;\n    next_act = tmp_act;\n    __syncthreads();\n  }\n  return curr_act;\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* FNNHelper<USE_SHARED>::forward(float* input, float* theta_s)\n{\n  uint tdx = threadIdx.x;\n  uint tdz = threadIdx.z;\n  float* curr_act =\n      &theta_s[SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float) + (2 * LARGEST_LAYER) * (blockDim.x * tdz + tdx)];\n\n  return forward(input, theta_s, curr_act);\n}\ntemplate <bool USE_SHARED>\n__device__ float* FNNHelper<USE_SHARED>::getInputLocation(float* theta_s)\n{\n  uint tdx = threadIdx.x;\n  uint tdz = threadIdx.z;\n  return theta_s + SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float) + (2 * LARGEST_LAYER) * (blockDim.x * tdz + tdx);\n}\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/fnn_helper.cuh",
    "content": "#ifndef MPPIGENERIC_FNN_HELPER_CUH\n#define MPPIGENERIC_FNN_HELPER_CUH\n\n#include \"meta_math.h\"\n#include <mppi/utils/math_utils.h>\n#include <mppi/utils/managed.cuh>\n#include <mppi/utils/file_utils.h>\n#include <cnpy.h>\n#include <atomic>\n#include <mppi/utils/activation_functions.cuh>\n\ntemplate <bool USE_SHARED = true>\nclass FNNHelper : public Managed\n{\npublic:\n  // EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  explicit FNNHelper<USE_SHARED>(const std::vector<int>& layers, cudaStream_t stream = 0);\n  explicit FNNHelper<USE_SHARED>(std::string, cudaStream_t stream = 0);\n  explicit FNNHelper<USE_SHARED>(const cnpy::npz_t& param_dict, cudaStream_t stream = 0);\n  explicit FNNHelper<USE_SHARED>(const cnpy::npz_t& param_dict, std::string prefix, cudaStream_t stream = 0);\n  ~FNNHelper();\n\n  void loadParams(const std::string& model_path);\n  void loadParams(const cnpy::npz_t& npz);\n  void loadParams(std::string prefix, const cnpy::npz_t& npz, bool add_slash = true);\n\n  void GPUSetup();\n\n  void updateModel(const std::vector<int>& description, const std::vector<float>& data);\n  void updateModel(const std::vector<float>& data);\n\n  void freeCudaMem();\n\n  void paramsToDevice();\n\n  bool computeGrad(Eigen::Ref<Eigen::MatrixXf> A);\n\n  bool computeGrad(const Eigen::Ref<const Eigen::MatrixXf>& input, Eigen::Ref<Eigen::MatrixXf> A);\n\n  void forward(const Eigen::Ref<const Eigen::MatrixXf>& input, Eigen::Ref<Eigen::MatrixXf> output);\n\n  __device__ float* initialize(float* theta_s);\n\n  __device__ float* forward(float* input, float* theta_s);\n  __device__ float* forward(float* input, float* theta_s, float* curr_act);\n\n  __host__ __device__ int* getNetStructurePtr() const\n  {\n    return net_structure_;\n  }\n  __host__ __device__ int* getStrideIdcsPtr() const\n  {\n    return stride_idcs_;\n  }\n  __host__ __device__ float* getThetaPtr() const\n  {\n    return theta_;\n  }\n\n  __host__ __device__ int getNumLayers() const\n  {\n    return this->NUM_LAYERS;\n  }\n  __host__ __device__ int getNumParams() const\n  {\n    return this->NUM_PARAMS;\n  }\n  __host__ __device__ int getLargestLayer() const\n  {\n    return this->LARGEST_LAYER;\n  }\n  __host__ __device__ int getStrideSize() const\n  {\n    return this->STRIDE_SIZE;\n  }\n\n  Eigen::VectorXf getZeroInputVector()\n  {\n    return Eigen::VectorXf::Zero(INPUT_DIM);\n  }\n  Eigen::VectorXf getZeroOutputVector()\n  {\n    return Eigen::VectorXf::Zero(OUTPUT_DIM);\n  }\n  Eigen::MatrixXf getZeroJacobianMatrix()\n  {\n    return Eigen::MatrixXf::Zero(OUTPUT_DIM, INPUT_DIM);\n  }\n\n  __device__ __host__ int getInputDim() const\n  {\n    return INPUT_DIM;\n  }\n  __host__ __device__ int getOutputDim() const\n  {\n    return OUTPUT_DIM;\n  }\n\n  __device__ float* getInputLocation(float* theta_s);\n\n  void setAllWeights(float input)\n  {\n    std::vector<float> vals(NUM_PARAMS);\n    std::fill(vals.begin(), vals.end(), input);\n    updateModel(vals);\n  }\n\n  // device pointer, null on the device\n  FNNHelper<USE_SHARED>* network_d_ = nullptr;\n\n  float* weights_d_ = nullptr;\n\nprivate:\n  int NUM_LAYERS = 0;     ///< Total number of layers (including in/out layer)\n  int LARGEST_LAYER = 0;  ///< Number of neurons in the largest layer(including in/out neurons)\n  int NUM_PARAMS = 0;     ///< Total number of model parameters;\n  int PARAM_SIZE = 0;     ///< Maximum size of the parameters stored in shared memory\n  int STRIDE_SIZE = 0;\n\n  int INPUT_DIM = 0;\n  int OUTPUT_DIM = 0;\n\n  // packed by all weights that connect layer 1 to layer 2 neuron 1, bias for all connections from layer 1 to layer 2\n  // then layer 2 neuron 2, etc\n  float* theta_ = nullptr;\n\n  // index into theta for weights and bias (layer 0 weights start, no bias in input layer, layer 1 weights start, layer1\n  // bias start...\n  int* stride_idcs_ = nullptr;\n\n  //[neurons in layer 1, neurons in layer 2, ...]\n  int* net_structure_ = nullptr;\n\n  std::atomic<bool> changed_weights_ = { false };\n\n  std::vector<Eigen::MatrixXf> weighted_in_; /// eigen aligned does not matter here since we have a variable size eigen matrix\n\n  void setupMemory(const std::vector<int>& layers);\n};\n\n#if __CUDACC__\n#include \"fnn_helper.cu\"\n#endif\n\n#endif  // MPPIGENERIC_FNN_HELPER_CUH\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/lstm_helper.cu",
    "content": "//\n// Created by jason on 8/20/22.\n//\n\n#include \"lstm_helper.cuh\"\n\ntemplate <bool USE_SHARED>\nLSTMHelper<USE_SHARED>::LSTMHelper(LSTMConfig& config, cudaStream_t stream)\n  : LSTMHelper<USE_SHARED>(config.input_dim, config.hidden_dim, config.output_layers, stream)\n{\n}\n\ntemplate <bool USE_SHARED>\nLSTMHelper<USE_SHARED>::LSTMHelper(int input_dim, int hidden_dim, std::vector<int>& output_layers, cudaStream_t stream)\n  : Managed(stream)\n{\n  output_nn_ = new FNNHelper<USE_SHARED>(output_layers, this->stream_);\n  setupMemory(input_dim, hidden_dim);\n}\n\ntemplate <bool USE_SHARED>\nLSTMHelper<USE_SHARED>::LSTMHelper(std::string path, cudaStream_t stream) : Managed(stream)\n{\n  loadParams(path);\n}\n\ntemplate <bool USE_SHARED>\nLSTMHelper<USE_SHARED>::LSTMHelper(const cnpy::npz_t& param_dict, std::string prefix, bool add_slash,\n                                   cudaStream_t stream)\n  : Managed(stream)\n{\n  loadParams(prefix, param_dict, add_slash);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::setupMemory(int input_dim, int hidden_dim)\n{\n  HIDDEN_DIM = hidden_dim;\n  INPUT_DIM = input_dim;\n  OUTPUT_DIM = output_nn_->getOutputDim();\n  assert(output_nn_->getInputDim() == INPUT_DIM + HIDDEN_DIM);\n\n  bool setupGPU = this->GPUMemStatus_;\n  if (setupGPU)\n  {\n    freeCudaMem();\n  }\n\n  HIDDEN_HIDDEN_SIZE = HIDDEN_DIM * HIDDEN_DIM;\n  INPUT_HIDDEN_SIZE = HIDDEN_DIM * INPUT_DIM;\n\n  // The initial cell and hidden does not need to be stored in shared memory\n  LSTM_PARAM_SIZE_BYTES = (HIDDEN_DIM * 4 + HIDDEN_HIDDEN_SIZE * 4 + INPUT_HIDDEN_SIZE * 4) * sizeof(float);\n  LSTM_SHARED_MEM_GRD_BYTES = mppi::math::int_multiple_const(LSTM_PARAM_SIZE_BYTES * USE_SHARED, sizeof(float4));\n\n  SHARED_MEM_REQUEST_GRD_BYTES = output_nn_->getGrdSharedSizeBytes() + LSTM_SHARED_MEM_GRD_BYTES;\n  SHARED_MEM_REQUEST_BLK_BYTES = output_nn_->getBlkSharedSizeBytes() +\n                                 mppi::math::int_multiple_const((2 * HIDDEN_DIM) * sizeof(float), sizeof(float4));\n\n  hidden_state_ = Eigen::VectorXf(HIDDEN_DIM);\n  hidden_state_.setZero();\n  cell_state_ = Eigen::VectorXf(HIDDEN_DIM);\n  cell_state_.setZero();\n\n  if (weights_ != nullptr)\n  {\n    delete weights_;\n  }\n\n  // allocate all the memory dynamically\n  weights_ = (float*)::operator new(LSTM_PARAM_SIZE_BYTES + 2 * HIDDEN_DIM * sizeof(float));\n  W_im_ = weights_;\n  W_fm_ = weights_ + HIDDEN_HIDDEN_SIZE;\n  W_om_ = weights_ + 2 * HIDDEN_HIDDEN_SIZE;\n  W_cm_ = weights_ + 3 * HIDDEN_HIDDEN_SIZE;\n\n  W_ii_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE;\n  W_fi_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + INPUT_HIDDEN_SIZE;\n  W_oi_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 2 * INPUT_HIDDEN_SIZE;\n  W_ci_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 3 * INPUT_HIDDEN_SIZE;\n\n  b_i_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE;\n  b_f_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + HIDDEN_DIM;\n  b_o_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 2 * HIDDEN_DIM;\n  b_c_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 3 * HIDDEN_DIM;\n\n  initial_hidden_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 4 * HIDDEN_DIM;\n  initial_cell_ = weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 5 * HIDDEN_DIM;\n\n  memset(weights_, 0.0f, LSTM_PARAM_SIZE_BYTES + HIDDEN_DIM * 2 * sizeof(float));\n\n  if (setupGPU)\n  {\n    GPUSetup();\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::updateLSTMInitialStates(const Eigen::Ref<const Eigen::VectorXf> hidden,\n                                                     const Eigen::Ref<const Eigen::VectorXf> cell)\n{\n  setHiddenState(hidden);\n  setCellState(cell);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::GPUSetup()\n{\n  output_nn_->GPUSetup();\n  if (!this->GPUMemStatus_)\n  {\n    network_d_ = Managed::GPUSetup<LSTMHelper<USE_SHARED>>(this);\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->output_nn_), &(this->output_nn_->network_d_),\n                                 sizeof(OUTPUT_FNN_T*), cudaMemcpyHostToDevice, this->stream_));\n    HANDLE_ERROR(cudaMalloc((void**)&(this->weights_d_), LSTM_PARAM_SIZE_BYTES + HIDDEN_DIM * 2 * sizeof(float)));\n\n    // copies all pointers to be right on the GPU side\n    float* incr_ptr = weights_d_;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->weights_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_im_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += HIDDEN_HIDDEN_SIZE;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_fm_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += HIDDEN_HIDDEN_SIZE;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_om_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += HIDDEN_HIDDEN_SIZE;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_cm_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += HIDDEN_HIDDEN_SIZE;\n\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_ii_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += INPUT_HIDDEN_SIZE;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_fi_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += INPUT_HIDDEN_SIZE;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_oi_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += INPUT_HIDDEN_SIZE;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->W_ci_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice,\n                                 this->stream_));\n    incr_ptr += INPUT_HIDDEN_SIZE;\n\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&(this->network_d_->b_i_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice, this->stream_));\n    incr_ptr += HIDDEN_DIM;\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&(this->network_d_->b_f_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice, this->stream_));\n    incr_ptr += HIDDEN_DIM;\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&(this->network_d_->b_o_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice, this->stream_));\n    incr_ptr += HIDDEN_DIM;\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&(this->network_d_->b_c_), &(incr_ptr), sizeof(float*), cudaMemcpyHostToDevice, this->stream_));\n    incr_ptr += HIDDEN_DIM;\n\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->initial_hidden_), &(incr_ptr), sizeof(float*),\n                                 cudaMemcpyHostToDevice, this->stream_));\n    incr_ptr += HIDDEN_DIM;\n    HANDLE_ERROR(cudaMemcpyAsync(&(this->network_d_->initial_cell_), &(incr_ptr), sizeof(float*),\n                                 cudaMemcpyHostToDevice, this->stream_));\n    paramsToDevice();\n  }\n  else\n  {\n    this->logger_->debug(\"LSTM GPU Memory already set\\n\");\n  }\n}\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::freeCudaMem()\n{\n  output_nn_->freeCudaMem();\n  if (this->GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaFree(network_d_));\n    HANDLE_ERROR(cudaFree(weights_d_));\n    this->GPUMemStatus_ = false;\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::paramsToDevice()\n{\n  if (this->GPUMemStatus_)\n  {\n    // copies entire params to device\n    HANDLE_ERROR(cudaMemcpyAsync(this->weights_d_, this->weights_,\n                                 LSTM_PARAM_SIZE_BYTES + HIDDEN_DIM * 2 * sizeof(float), cudaMemcpyHostToDevice,\n                                 stream_));\n  }\n}\n\ntemplate <bool USE_SHARED>\n__device__ void LSTMHelper<USE_SHARED>::initialize(float* theta_s)\n{\n  this->initialize(theta_s, SHARED_MEM_REQUEST_BLK_BYTES, SHARED_MEM_REQUEST_GRD_BYTES, 0);\n}\n\ntemplate <bool USE_SHARED>\n__device__ void LSTMHelper<USE_SHARED>::initialize(float* theta_s, int blk_size, int grd_size, int blk_offset)\n{\n  this->initialize(theta_s, blk_size, grd_size, blk_offset, 0);\n}\n\ntemplate <bool USE_SHARED>\n__device__ void LSTMHelper<USE_SHARED>::initialize(float* theta_s, int blk_size, int grd_size, int blk_offset,\n                                                   int grd_offset)\n{\n  if (USE_SHARED)\n  {\n    // if (threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0)\n    // {\n    //   memcpy(theta_s, weights_, LSTM_PARAM_SIZE_BYTES);\n    // }\n    for (int i = blockDim.x * threadIdx.y + threadIdx.x; i < LSTM_PARAM_SIZE_BYTES / sizeof(float);\n         i += blockDim.x * blockDim.y)\n    {\n      theta_s[i + grd_offset / sizeof(float)] = weights_[i];\n    }\n    output_nn_->initialize(theta_s + grd_offset / sizeof(float) + LSTM_SHARED_MEM_GRD_BYTES / sizeof(float));\n    __syncthreads();\n  }\n\n  // copies the initial cell and hidden state to the correct place\n  const int shift =\n      grd_size / sizeof(float) + blk_size * (threadIdx.x + blockDim.x * threadIdx.z) / sizeof(float) + blk_offset;\n  for (int i = threadIdx.y; i < HIDDEN_DIM; i += blockDim.y)\n  {\n    (theta_s + shift)[i] = (weights_ + LSTM_PARAM_SIZE_BYTES / sizeof(float))[i];\n    (theta_s + shift + HIDDEN_DIM)[i] = (weights_ + LSTM_PARAM_SIZE_BYTES / sizeof(float) + HIDDEN_DIM)[i];\n  }\n\n  // if (threadIdx.y == 0)\n  // {\n  //   memcpy(theta_s + shift, weights_ + LSTM_PARAM_SIZE_BYTES / sizeof(float), 2 * HIDDEN_DIM * sizeof(float));\n  // }\n  __syncthreads();\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::copyHiddenCellToDevice()\n{\n  if (this->GPUMemStatus_)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->weights_d_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 4 * HIDDEN_DIM,\n                                 this->weights_ + 4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 4 * HIDDEN_DIM,\n                                 2 * HIDDEN_DIM * sizeof(float), cudaMemcpyHostToDevice, stream_));\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::updateOutputModel(const std::vector<int>& description, const std::vector<float>& data)\n{\n  output_nn_->updateModel(description, data);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::updateOutputModel(const std::vector<float>& data)\n{\n  output_nn_->updateModel(data);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::forward(const Eigen::Ref<const Eigen::VectorXf>& input)\n{\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_im(W_im_, HIDDEN_DIM,\n                                                                                             HIDDEN_DIM);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_fm(W_fm_, HIDDEN_DIM,\n                                                                                             HIDDEN_DIM);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_om(W_om_, HIDDEN_DIM,\n                                                                                             HIDDEN_DIM);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_cm(W_cm_, HIDDEN_DIM,\n                                                                                             HIDDEN_DIM);\n\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_ii(W_ii_, HIDDEN_DIM,\n                                                                                             INPUT_DIM);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_fi(W_fi_, HIDDEN_DIM,\n                                                                                             INPUT_DIM);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_oi(W_oi_, HIDDEN_DIM,\n                                                                                             INPUT_DIM);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_W_ci(W_ci_, HIDDEN_DIM,\n                                                                                             INPUT_DIM);\n\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_b_i(b_i_, HIDDEN_DIM, 1);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_b_f(b_f_, HIDDEN_DIM, 1);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_b_o(b_o_, HIDDEN_DIM, 1);\n  Eigen::Map<Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>> eig_b_c(b_c_, HIDDEN_DIM, 1);\n\n  Eigen::VectorXf g_i = eig_W_im * hidden_state_ + eig_W_ii * input + eig_b_i;\n  Eigen::VectorXf g_f = eig_W_fm * hidden_state_ + eig_W_fi * input + eig_b_f;\n  Eigen::VectorXf g_o = eig_W_om * hidden_state_ + eig_W_oi * input + eig_b_o;\n  Eigen::VectorXf g_c = eig_W_cm * hidden_state_ + eig_W_ci * input + eig_b_c;\n  g_i = g_i.unaryExpr(&mppi::nn::sigmoid);\n  g_f = g_f.unaryExpr(&mppi::nn::sigmoid);\n  g_o = g_o.unaryExpr(&mppi::nn::sigmoid);\n  g_c = g_c.unaryExpr(&mppi::nn::tanh);\n\n  Eigen::VectorXf c_next = g_i.cwiseProduct(g_c) + g_f.cwiseProduct(cell_state_);\n  Eigen::VectorXf h_next = g_o.cwiseProduct(c_next.unaryExpr(&mppi::nn::tanh));\n\n  hidden_state_ = h_next;\n  cell_state_ = c_next;\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::forward(const Eigen::Ref<const Eigen::VectorXf>& input, Eigen::Ref<Eigen::VectorXf> output)\n{\n  forward(input);\n  Eigen::VectorXf nn_input = output_nn_->getZeroInputVector();\n  for (int i = 0; i < HIDDEN_DIM; i++)\n  {\n    nn_input(i) = hidden_state_(i);\n  }\n  for (int i = 0; i < INPUT_DIM; i++)\n  {\n    nn_input(i + HIDDEN_DIM) = input(i);\n  }\n\n  output_nn_->forward(nn_input, output);\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* LSTMHelper<USE_SHARED>::forward(float* input, float* theta_s)\n{\n  const int block_idx = (blockDim.x * threadIdx.z + threadIdx.x) * SHARED_MEM_REQUEST_BLK_BYTES / sizeof(float) +\n                        SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);\n  return forward(input, theta_s, theta_s + block_idx);\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* LSTMHelper<USE_SHARED>::forward(float* input, float* theta_s, float* block_ptr)\n{\n  float* c = &block_ptr[0];\n  float* g_o = &block_ptr[2 * HIDDEN_DIM];  // input gate\n  return forward(input, theta_s, c, g_o);\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* LSTMHelper<USE_SHARED>::forward(float* input, float* theta_s, float* hidden_cell, float* block_ptr)\n{\n  // Weights\n  float* W_ii = this->W_ii_;\n  float* W_im = this->W_im_;\n  float* W_fi = this->W_fi_;\n  float* W_fm = this->W_fm_;\n  float* W_oi = this->W_oi_;\n  float* W_om = this->W_om_;\n  float* W_ci = this->W_ci_;\n  float* W_cm = this->W_cm_;\n\n  // Biases\n  float* b_i = this->b_i_;  // hidden_size\n  float* b_f = this->b_f_;  // hidden_size\n  float* b_o = this->b_o_;  // hidden_size\n  float* b_c = this->b_c_;  // hidden_size\n\n  if (USE_SHARED)\n  {\n    // Weights\n    W_im = &theta_s[0];\n    W_fm = &theta_s[HIDDEN_HIDDEN_SIZE];\n    W_om = &theta_s[2 * HIDDEN_HIDDEN_SIZE];\n    W_cm = &theta_s[3 * HIDDEN_HIDDEN_SIZE];\n    W_ii = &theta_s[4 * HIDDEN_HIDDEN_SIZE];\n    W_fi = &theta_s[4 * HIDDEN_HIDDEN_SIZE + INPUT_HIDDEN_SIZE];\n    W_oi = &theta_s[4 * HIDDEN_HIDDEN_SIZE + 2 * INPUT_HIDDEN_SIZE];\n    W_ci = &theta_s[4 * HIDDEN_HIDDEN_SIZE + 3 * INPUT_HIDDEN_SIZE];\n\n    // Biases\n    b_i = &theta_s[4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE];\n    b_f = &theta_s[4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + HIDDEN_DIM];\n    b_o = &theta_s[4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 2 * HIDDEN_DIM];\n    b_c = &theta_s[4 * HIDDEN_HIDDEN_SIZE + 4 * INPUT_HIDDEN_SIZE + 3 * HIDDEN_DIM];\n  }\n\n  uint i, j;\n\n  // Intermediate outputs\n  // for each block we have prior cell/hidden state\n  float* const h = &hidden_cell[0];\n  float* const c = &hidden_cell[HIDDEN_DIM];\n\n  // each block has place to compute g_o, and input\n  float* const g_o = &block_ptr[0];  // output gate\n  float* const x = &block_ptr[HIDDEN_DIM];\n\n  // FNN needs space for input and activations\n  float* const output_act = g_o;\n\n  uint tdy = threadIdx.y;\n\n  // load input into theta_s\n  if (input != nullptr)\n  {\n    for (i = tdy; i < INPUT_DIM; i += blockDim.y)\n    {\n      x[i] = input[i];\n    }\n    __syncthreads();\n  }\n\n  float temp_g_i = 0;\n  float temp_g_f = 0;\n  float temp_g_o = 0;\n  float temp_cell_update = 0;\n\n  // apply each gate in parallel\n  for (i = tdy; i < HIDDEN_DIM; i += blockDim.y)\n  {\n    temp_g_i = 0;\n    temp_g_f = 0;\n    temp_g_o = 0;\n    temp_cell_update = 0;\n    for (j = 0; j < INPUT_DIM; j++)\n    {\n      int index = i * INPUT_DIM + j;\n      temp_g_i += W_ii[index] * x[j];\n      temp_g_f += W_fi[index] * x[j];\n      temp_g_o += W_oi[index] * x[j];\n      temp_cell_update += W_ci[index] * x[j];\n    }\n    for (j = 0; j < HIDDEN_DIM; j++)\n    {\n      int index = i * HIDDEN_DIM + j;\n      temp_g_i += W_im[index] * h[j];\n      temp_g_f += W_fm[index] * h[j];\n      temp_g_o += W_om[index] * h[j];\n      temp_cell_update += W_cm[index] * h[j];\n    }\n    temp_g_i += b_i[i];\n    temp_g_f += b_f[i];\n    temp_g_o += b_o[i];\n    temp_cell_update += b_c[i];\n\n    temp_g_i = mppi::nn::sigmoid(temp_g_i);\n    temp_g_f = mppi::nn::sigmoid(temp_g_f);\n    temp_cell_update = mppi::nn::tanh(temp_cell_update);\n\n    g_o[i] = mppi::nn::sigmoid(temp_g_o);\n\n    c[i] = temp_g_i * temp_cell_update + temp_g_f * c[i];\n  }\n  __syncthreads();\n\n  // copy computed hidden/cell state to theta_s\n  for (i = tdy; i < HIDDEN_DIM; i += blockDim.y)\n  {\n    h[i] = mppi::nn::tanh(c[i]) * g_o[i];  // actually using c_next intentionally\n    output_act[i] = h[i];\n  }\n\n  // copy input to activation\n  // for (i = tdy; i < INPUT_DIM; i += blockDim.y)\n  // {\n  //   output_act[i + HIDDEN_DIM] = x[i];\n  // }\n  __syncthreads();\n\n  return output_nn_->forward(nullptr, theta_s + LSTM_SHARED_MEM_GRD_BYTES / sizeof(float), output_act);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::resetHiddenCellCPU()\n{\n  for (int i = 0; i < HIDDEN_DIM; i++)\n  {\n    hidden_state_(i) = initial_hidden_[i];\n    cell_state_(i) = initial_cell_[i];\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::setHiddenState(const Eigen::Ref<const Eigen::VectorXf> hidden_state)\n{\n  for (int i = 0; i < HIDDEN_DIM; i++)\n  {\n    initial_hidden_[i] = hidden_state(i);\n    hidden_state_(i) = hidden_state(i);\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::setCellState(const Eigen::Ref<const Eigen::VectorXf> cell_state)\n{\n  for (int i = 0; i < HIDDEN_DIM; i++)\n  {\n    initial_cell_[i] = cell_state(i);\n    cell_state_(i) = cell_state(i);\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::loadParams(const std::string& model_path)\n{\n  if (!fileExists(model_path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << model_path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(model_path);\n  loadParams(param_dict);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::loadParams(const cnpy::npz_t& param_dict)\n{\n  loadParams(\"\", param_dict);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMHelper<USE_SHARED>::loadParams(std::string prefix, const cnpy::npz_t& param_dict, bool add_slash)\n{\n  if (add_slash && !prefix.empty() && *prefix.rbegin() != '/')\n  {\n    prefix.append(\"/\");\n  }\n\n  if (param_dict.find(\"model/\" + prefix + \"lstm/weight_hh_l0\") != param_dict.end())\n  {\n    prefix.insert(0, \"model/\");\n  }\n\n  // assumes it has been unonioned\n  if (output_nn_ == nullptr)\n  {\n    output_nn_ = new FNNHelper<USE_SHARED>(param_dict, prefix + \"output/\", this->stream_);\n  }\n  else\n  {\n    output_nn_->loadParams(prefix + \"output/\", param_dict);\n  }\n\n  cnpy::NpyArray weight_hh_raw = param_dict.at(prefix + \"lstm/weight_hh_l0\");\n  cnpy::NpyArray bias_hh_raw = param_dict.at(prefix + \"lstm/bias_hh_l0\");\n  cnpy::NpyArray weight_ih_raw = param_dict.at(prefix + \"lstm/weight_ih_l0\");\n  cnpy::NpyArray bias_ih_raw = param_dict.at(prefix + \"lstm/bias_ih_l0\");\n  double* weight_hh = weight_hh_raw.data<double>();\n  double* bias_hh = bias_hh_raw.data<double>();\n  double* weight_ih = weight_ih_raw.data<double>();\n  double* bias_ih = bias_ih_raw.data<double>();\n\n  int input_dim = weight_ih_raw.shape[1];\n  int hidden_dim = bias_hh_raw.shape[0] / 4;\n  setupMemory(input_dim, hidden_dim);\n\n  for (int i = 0; i < HIDDEN_HIDDEN_SIZE; i++)\n  {\n    W_im_[i] = weight_hh[i];\n    W_fm_[i] = weight_hh[i + HIDDEN_HIDDEN_SIZE];\n    W_cm_[i] = weight_hh[i + 2 * HIDDEN_HIDDEN_SIZE];\n    W_om_[i] = weight_hh[i + 3 * HIDDEN_HIDDEN_SIZE];\n    assert(isfinite(W_im_[i]));\n    assert(isfinite(W_fm_[i]));\n    assert(isfinite(W_cm_[i]));\n    assert(isfinite(W_om_[i]));\n  }\n  for (int i = 0; i < INPUT_HIDDEN_SIZE; i++)\n  {\n    W_ii_[i] = weight_ih[i];\n    W_fi_[i] = weight_ih[i + INPUT_HIDDEN_SIZE];\n    W_ci_[i] = weight_ih[i + 2 * INPUT_HIDDEN_SIZE];\n    W_oi_[i] = weight_ih[i + 3 * INPUT_HIDDEN_SIZE];\n    assert(isfinite(W_ii_[i]));\n    assert(isfinite(W_fi_[i]));\n    assert(isfinite(W_ci_[i]));\n    assert(isfinite(W_oi_[i]));\n  }\n  for (int i = 0; i < HIDDEN_DIM; i++)\n  {\n    b_i_[i] = bias_hh[i] + bias_ih[i];\n    b_f_[i] = bias_hh[i + HIDDEN_DIM] + bias_ih[i + HIDDEN_DIM];\n    b_c_[i] = bias_hh[i + 2 * HIDDEN_DIM] + bias_ih[i + 2 * HIDDEN_DIM];\n    b_o_[i] = bias_hh[i + 3 * HIDDEN_DIM] + bias_ih[i + 3 * HIDDEN_DIM];\n    assert(isfinite(b_i_[i]));\n    assert(isfinite(b_f_[i]));\n    assert(isfinite(b_c_[i]));\n    assert(isfinite(b_o_[i]));\n  }\n\n  // Save parameters to GPU memory\n  paramsToDevice();\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* LSTMHelper<USE_SHARED>::getInputLocation(float* theta_s)\n{\n  const int block_idx = (blockDim.x * threadIdx.z + threadIdx.x) * SHARED_MEM_REQUEST_BLK_BYTES / sizeof(float) +\n                        SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float);\n  float* x = theta_s + block_idx + 3 * HIDDEN_DIM;\n  return x;\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* LSTMHelper<USE_SHARED>::getInputLocation(float* theta_s, const int grd_shift, const int blk_shift,\n                                                           const int shift)\n{\n  float* x = theta_s + grd_shift + blk_shift + shift + 3 * HIDDEN_DIM;\n  return x;\n}\n\ntemplate <bool USE_SHARED>\n__device__ float* LSTMHelper<USE_SHARED>::getHiddenCellLocation(float* theta_s, const int grd_shift,\n                                                                const int blk_shift, const int shift)\n{\n  float* x = theta_s + grd_shift + blk_shift + shift;\n  return x;\n}\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/lstm_helper.cuh",
    "content": "#ifndef MPPIGENERIC_LSTM_HELPER_CUH\n#define MPPIGENERIC_LSTM_HELPER_CUH\n\n#include \"fnn_helper.cuh\"\n\nstruct LSTMConfig {\n  int input_dim = -1;\n  int hidden_dim = -1;\n  std::vector<int> output_layers = {-1};\n\n};\n\ntemplate <bool USE_SHARED = true>\nclass LSTMHelper : public Managed\n{\npublic:\n  EIGEN_MAKE_ALIGNED_OPERATOR_NEW\n\n  typedef FNNHelper<USE_SHARED> OUTPUT_FNN_T;\n\n  // TODO create destructor that deallocates memory\n\n  LSTMHelper<USE_SHARED>(LSTMConfig& config, cudaStream_t stream = 0);\n  LSTMHelper<USE_SHARED>(int input_dim, int hidden_dim, std::vector<int>& output_layers, cudaStream_t stream = 0);\n  LSTMHelper<USE_SHARED>(std::string, cudaStream_t stream = 0);\n  LSTMHelper<USE_SHARED>(const cnpy::npz_t& param_dict, std::string prefix, bool add_slash = true,\n                         cudaStream_t stream = 0);\n  ~LSTMHelper()\n  {\n    freeCudaMem();\n  }\n\n  void loadParams(const std::string& model_path);\n  void loadParams(const cnpy::npz_t& npz);\n  void loadParams(std::string prefix, const cnpy::npz_t& npz, bool add_slash = true);\n\n  __device__ void initialize(float* theta_s);\n  __device__ void initialize(float* theta_s, int blk_size, int grd_size, int blk_offset);\n  __device__ void initialize(float* theta_s, int blk_size, int grd_size, int blk_offset, int grd_offset);\n\n  void GPUSetup();\n  void freeCudaMem();\n  void paramsToDevice();\n  void copyHiddenCellToDevice();\n\n  void updateOutputModel(const std::vector<int>& description, const std::vector<float>& data);\n  void updateOutputModel(const std::vector<float>& data);\n  void updateLSTMInitialStates(const Eigen::Ref<const Eigen::VectorXf> hidden,\n                               const Eigen::Ref<const Eigen::VectorXf> cell);\n\n  // bool computeGrad(Eigen::Ref<dfdx> A);\n  // bool computeGrad(const Eigen::Ref<const Eigen::MatrixXf>& input, Eigen::Ref<dfdx> A);\n\n  void forward(const Eigen::Ref<const Eigen::VectorXf>& input, Eigen::Ref<Eigen::VectorXf> output);\n  void forward(const Eigen::Ref<const Eigen::VectorXf>& input);\n  __device__ float* forward(float* input, float* theta_s);\n  __device__ float* forward(float* input, float* theta_s, float* block_ptr);\n  __device__ float* forward(float* input, float* theta_s, float* hidden_cell, float* block_ptr);\n  __device__ float* getInputLocation(float* theta_s);\n  __device__ float* getInputLocation(float* theta_s, const int grd_shift, const int blk_shift, const int shift);\n  __device__ float* getHiddenCellLocation(float* theta_s, const int grd_shift, const int blk_shift, const int shift);\n\n  void resetHiddenCellCPU();\n\n  void setAllValues(std::vector<float>& lstm, std::vector<float>& output)\n  {\n    assert(lstm.size() == getNumParams());\n    memcpy(this->weights_, lstm.data(), LSTM_PARAM_SIZE_BYTES + HIDDEN_DIM * 2 * sizeof(float));\n    resetHiddenCellCPU();\n    output_nn_->updateModel(output);\n  }\n\n  void setAllValues(float input)\n  {\n    for (int i = 0; i < LSTM_PARAM_SIZE_BYTES / sizeof(float) + 2 * HIDDEN_DIM; i++)\n    {\n      weights_[i] = input;\n    }\n    resetHiddenCellCPU();\n    output_nn_->setAllWeights(input);\n  }\n\n  Eigen::VectorXf getHiddenState()\n  {\n    return hidden_state_;\n  }\n  Eigen::VectorXf getCellState()\n  {\n    return cell_state_;\n  }\n\n  __host__ __device__ OUTPUT_FNN_T* getOutputModel() const\n  {\n    return output_nn_;\n  }\n\n  __host__ __device__ float* getOutputWeights() const\n  {\n    return output_nn_->weights_d_;\n  }\n\n  __host__ __device__ int getHiddenDim() const\n  {\n    return HIDDEN_DIM;\n  }\n  __host__ __device__ int getInputDim() const\n  {\n    return INPUT_DIM;\n  }\n  __host__ __device__ int getOutputDim() const\n  {\n    return OUTPUT_DIM;\n  }\n  __host__ __device__ int getHiddenHiddenSize() const\n  {\n    return HIDDEN_HIDDEN_SIZE;\n  }\n  __host__ __device__ int getInputHiddenSize() const\n  {\n    return INPUT_HIDDEN_SIZE;\n  }\n  __host__ __device__ int getOutputGrdSharedSizeBytes() const\n  {\n    return output_nn_->getGrdSharedSizeBytes();\n  }\n  __host__ __device__ int getLSTMGrdSharedSizeBytes() const\n  {\n    return LSTM_SHARED_MEM_GRD_BYTES;\n  }\n  __host__ __device__ int getBlkLSTMSharedSizeBytes() const\n  {\n    return (3 * HIDDEN_DIM + INPUT_DIM) * sizeof(float);\n  }\n  __host__ __device__ float* getWeights()\n  {\n    return weights_;\n  }\n\n  int getNumParams() const\n  {\n    return LSTM_PARAM_SIZE_BYTES / sizeof(float) + 2 * HIDDEN_DIM;\n  }\n\n  Eigen::VectorXf getZeroInputVector()\n  {\n    return Eigen::VectorXf::Zero(INPUT_DIM);\n  }\n  Eigen::VectorXf getZeroOutputVector()\n  {\n    return Eigen::VectorXf::Zero(OUTPUT_DIM);\n  }\n\n  void setHiddenState(const Eigen::Ref<const Eigen::VectorXf> hidden_state);\n  void setCellState(const Eigen::Ref<const Eigen::VectorXf> hidden_state);\n\n  // device pointer, null on the device\n  LSTMHelper<USE_SHARED>* network_d_ = nullptr;\n  float* weights_d_ = nullptr;\n\nprivate:\n  // params\n  OUTPUT_FNN_T* output_nn_ = nullptr;\n\n  int HIDDEN_DIM = 0;\n  int INPUT_DIM = 0;\n  int OUTPUT_DIM = 0;\n\n  int NUM_PARAMS = 1;\n\n  int LSTM_PARAM_SIZE_BYTES = 0;\n  int LSTM_SHARED_MEM_GRD_BYTES = 0;  ///< Amount of shared memory the LSTM needs per block\n\n  int HIDDEN_HIDDEN_SIZE = HIDDEN_DIM * HIDDEN_DIM;\n  int INPUT_HIDDEN_SIZE = HIDDEN_DIM * (INPUT_DIM);\n\n  float* W_im_ = nullptr;  ///< HIDDEN_HIDDEN_SIZE\n  float* W_fm_ = nullptr;  ///< HIDDEN_HIDDEN_SIZE\n  float* W_om_ = nullptr;  ///< HIDDEN_HIDDEN_SIZE\n  float* W_cm_ = nullptr;  ///< HIDDEN_HIDDEN_SIZE\n\n  float* W_ii_ = nullptr;  ///< INPUT_HIDDEN_SIZE\n  float* W_fi_ = nullptr;  ///< INPUT_HIDDEN_SIZE\n  float* W_oi_ = nullptr;  ///< INPUT_HIDDEN_SIZE\n  float* W_ci_ = nullptr;  ///< INPUT_HIDDEN_SIZE\n\n  float* b_i_ = nullptr;   ///< HIDDEN_SIZE\n  float* b_f_ = nullptr;   ///< HIDDEN_SIZE\n  float* b_o_ = nullptr;   ///< HIDDEN_SIZE\n  float* b_c_ = nullptr;   ///< HIDDEN_SIZE\n\n  float* initial_hidden_ = nullptr;\n  float* initial_cell_ = nullptr;\n\n  float* weights_ = nullptr;\n\n  Eigen::VectorXf hidden_state_;\n  Eigen::VectorXf cell_state_;\n\n  void setupMemory(int input_dim, int hidden_dim);\n};\n\n#if __CUDACC__\n#include \"lstm_helper.cu\"\n#endif\n\n#endif  // MPPIGENERIC_LSTM_HELPER_CUH\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/lstm_lstm_helper.cu",
    "content": "#include \"lstm_lstm_helper.cuh\"\n\ntemplate <bool USE_SHARED>\nLSTMLSTMHelper<USE_SHARED>::LSTMLSTMHelper(int init_input_dim, int init_hidden_dim,\n                                           std::vector<int>& init_output_layers, int input_dim, int hidden_dim,\n                                           std::vector<int>& output_layers, int init_len, cudaStream_t stream)\n{\n  init_len_ = init_len;\n  init_model_ = std::make_shared<LSTMHelper<false>>(init_input_dim, init_hidden_dim, init_output_layers);\n  lstm_ = std::make_shared<LSTMHelper<USE_SHARED>>(input_dim, hidden_dim, output_layers, stream);\n  assert(init_model_->getOutputDim() == lstm_->getHiddenDim() * 2);\n}\n\ntemplate <bool USE_SHARED>\nLSTMLSTMHelper<USE_SHARED>::LSTMLSTMHelper(std::string path, cudaStream_t stream)\n  : LSTMLSTMHelper<USE_SHARED>::LSTMLSTMHelper(path, \"\", stream)\n{\n}\n\ntemplate <bool USE_SHARED>\nLSTMLSTMHelper<USE_SHARED>::LSTMLSTMHelper(std::string path, std::string prefix, cudaStream_t stream)\n{\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n\n  init_model_ = std::make_shared<LSTMHelper<false>>(param_dict, prefix + \"init_\", false);\n  lstm_ = std::make_shared<LSTMHelper<USE_SHARED>>(param_dict, prefix, true, stream);\n\n  if (param_dict.find(\"init_length\") != param_dict.end())  // this is the new api but assert is meaningless right now\n  {\n    // TODO remove once all models are updated to the lastest export API\n    assert(param_dict.find(\"init_length\") != param_dict.end());\n    init_len_ = static_cast<int>(param_dict.at(\"init_length\").data<double>()[0]) + 1;\n  }\n  else\n  {\n    // old api here\n    assert(param_dict.find(\"num_points\") != param_dict.end());\n    int num_points = param_dict.at(\"num_points\").data<int>()[0];\n    assert(param_dict.find(\"init_input\") != param_dict.end());\n    init_len_ = param_dict.at(\"init_input\").shape[0] / (num_points * init_model_->getInputDim());\n  }\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::initializeLSTM(const Eigen::Ref<const Eigen::MatrixXf>& buffer)\n{\n  assert(buffer.rows() == init_model_->getInputDim());\n  assert(buffer.cols() >= init_len_);\n  // reset hidden/cell state\n  init_model_->resetHiddenCellCPU();\n\n  int t = buffer.cols() - init_len_;\n  for (; t < buffer.cols() - 1; t++)\n  {\n    init_model_->forward(buffer.col(t));\n  }\n\n  // run full model with output at end\n  Eigen::VectorXf output = init_model_->getZeroOutputVector();\n  init_model_->forward(buffer.col(t), output);\n\n  // set the lstm initial hidden/cell to output\n  lstm_->setHiddenState(output.head(lstm_->getHiddenDim()));\n  lstm_->setCellState(output.tail(lstm_->getHiddenDim()));\n\n  // only copies the hidden/cell states\n  lstm_->copyHiddenCellToDevice();\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::loadParamsInit(const std::string& model_path)\n{\n  init_model_->loadParams(model_path);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::loadParamsInit(const cnpy::npz_t& npz)\n{\n  init_model_->loadParams(npz);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::loadParamsLSTM(const std::string& model_path)\n{\n  lstm_->loadParams(model_path);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::loadParamsLSTM(const cnpy::npz_t& npz)\n{\n  lstm_->loadParams(npz);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::loadParams(std::string prefix, std::string model_path)\n{\n  if (!prefix.empty() && *prefix.rbegin() != '/')\n  {\n    prefix.append(\"/\");\n  }\n\n  if (!fileExists(model_path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << model_path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(model_path);\n\n  lstm_->loadParams(prefix, param_dict);\n  init_model_->loadParams(prefix + \"init_\", param_dict, false);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::GPUSetup()\n{\n  lstm_->GPUSetup();\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::freeCudaMem()\n{\n  lstm_->freeCudaMem();\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::forward(const Eigen::Ref<const Eigen::VectorXf>& input,\n                                         Eigen::Ref<Eigen::VectorXf> output)\n{\n  lstm_->forward(input, output);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::updateOutputModel(const std::vector<int>& description, const std::vector<float>& data)\n{\n  lstm_->updateOutputModel(description, data);\n}\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::updateOutputModelInit(const std::vector<int>& description,\n                                                       const std::vector<float>& data)\n{\n  init_model_->updateOutputModel(description, data);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::updateOutputModel(const std::vector<float>& data)\n{\n  lstm_->updateOutputModel(data);\n}\n\ntemplate <bool USE_SHARED>\nvoid LSTMLSTMHelper<USE_SHARED>::updateOutputModelInit(const std::vector<float>& data)\n{\n  init_model_->updateOutputModel(data);\n}\n\ntemplate <bool USE_SHARED>\nstd::shared_ptr<LSTMHelper<false>> LSTMLSTMHelper<USE_SHARED>::getInitModel()\n{\n  return init_model_;\n}\n\ntemplate <bool USE_SHARED>\nstd::shared_ptr<LSTMHelper<USE_SHARED>> LSTMLSTMHelper<USE_SHARED>::getLSTMModel()\n{\n  return lstm_;\n}\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/lstm_lstm_helper.cuh",
    "content": "//\n// Created by jason on 8/23/22.\n//\n\n#ifndef MPPIGENERIC_LSTM_LSTM_HELPER_CUH\n#define MPPIGENERIC_LSTM_LSTM_HELPER_CUH\n\n#include \"lstm_helper.cuh\"\n\nstruct LSTMLSTMConfig\n{\n  LSTMConfig init_config;\n  LSTMConfig pred_config;\n  int init_len = 0;\n};\n\ntemplate <bool USE_SHARED = true>\nclass LSTMLSTMHelper\n{\npublic:\n  LSTMLSTMHelper<USE_SHARED>(LSTMLSTMConfig config, cudaStream_t stream = 0)\n    : LSTMLSTMHelper<USE_SHARED>(config.init_config.input_dim, config.init_config.hidden_dim,\n                                 config.init_config.output_layers, config.pred_config.input_dim,\n                                 config.pred_config.hidden_dim, config.pred_config.output_layers, config.init_len,\n                                 stream)\n  {\n  }\n  LSTMLSTMHelper<USE_SHARED>(LSTMConfig init_config, LSTMConfig pred_config, int init_len, cudaStream_t stream = 0)\n    : LSTMLSTMHelper<USE_SHARED>(init_config.input_dim, init_config.hidden_dim, init_config.output_layers,\n                                 pred_config.input_dim, pred_config.hidden_dim, pred_config.output_layers, init_len,\n                                 stream)\n  {\n  }\n  LSTMLSTMHelper<USE_SHARED>(int init_input_dim, int init_hidden_dim, std::vector<int>& init_output_layers,\n                             int input_dim, int hidden_dim, std::vector<int>& output_layers, int init_len,\n                             cudaStream_t = 0);\n  // LSTMLSTMHelper<USE_SHARED>(std::string init_path, std::string lstm_path, cudaStream_t = 0);\n  LSTMLSTMHelper<USE_SHARED>(std::string path, std::string prefix, cudaStream_t = 0);\n  LSTMLSTMHelper<USE_SHARED>(std::string path, cudaStream_t = 0);\n\n  void loadParams(std::string prefix, std::string path);\n\n  void loadParamsInit(const std::string& model_path);\n  void loadParamsInit(const cnpy::npz_t& npz);\n\n  void loadParamsLSTM(const std::string& model_path);\n  void loadParamsLSTM(const cnpy::npz_t& npz);\n\n  void updateOutputModelInit(const std::vector<int>& description, const std::vector<float>& data);\n  void updateOutputModel(const std::vector<int>& description, const std::vector<float>& data);\n\n  void updateOutputModelInit(const std::vector<float>& data);\n  void updateOutputModel(const std::vector<float>& data);\n\n  void GPUSetup();\n  void freeCudaMem();\n\n  Eigen::VectorXf getZeroInputVector()\n  {\n    return lstm_->getZeroInputVector();\n  }\n  Eigen::VectorXf getZeroOutputVector()\n  {\n    return lstm_->getZeroOutputVector();\n  }\n\n  void resetInitHiddenCPU()\n  {\n    init_model_->resetHiddenCellCPU();\n  }\n  void resetLSTMHiddenCellCPU()\n  {\n    lstm_->resetHiddenCellCPU();\n  }\n\n  Eigen::MatrixXf getEmptyBufferMatrix()\n  {\n    return Eigen::MatrixXf(init_model_->getInputDim(), init_len_);\n  }\n\n  Eigen::MatrixXf getEmptyBufferMatrix(int length)\n  {\n    return Eigen::MatrixXf(init_model_->getInputDim(), length);\n  }\n\n  void initializeLSTM(const Eigen::Ref<const Eigen::MatrixXf>& buffer);\n\n  void forward(const Eigen::Ref<const Eigen::VectorXf>& input, Eigen::Ref<Eigen::VectorXf> output);\n\n  std::shared_ptr<LSTMHelper<false>> getInitModel();\n  std::shared_ptr<LSTMHelper<USE_SHARED>> getLSTMModel();\n  LSTMHelper<USE_SHARED>* getLSTMDevicePtr()\n  {\n    return lstm_->network_d_;\n  }\n\n  int getInitLen()\n  {\n    return init_len_;\n  }\n\nprivate:\n  std::shared_ptr<LSTMHelper<false>> init_model_ = nullptr;\n  std::shared_ptr<LSTMHelper<USE_SHARED>> lstm_ = nullptr;\n  int init_len_ = -1;\n};\n\n#if __CUDACC__\n#include \"lstm_lstm_helper.cu\"\n#endif\n\n#endif  // MPPIGENERIC_LSTM_LSTM_HELPER_CUH\n"
  },
  {
    "path": "include/mppi/utils/nn_helpers/meta_math.h",
    "content": "/**********************************************\n * @file meta_math.h\n * @author Grady Williams <gradyrw@gmail.com>\n * @date May 24, 2017\n * @copyright 2017 Georgia Institute of Technology\n * @brief template arithmetic for figuring out how\n * much memory to allocate for neural network on GPU\n ***********************************************/\n\n#ifndef MPPIGENERIC_META_MATH_H\n#define MPPIGENERIC_META_MATH_H\n\ntemplate <typename... Args>\nconstexpr int input_dim(int first, Args... args)\n{\n  return first;\n}\n\ntemplate <typename... Args>\nconstexpr int output_dim(int last)\n{\n  return last;\n}\n\ntemplate <typename... Args>\nconstexpr int output_dim(int first, Args... args)\n{\n  return output_dim(args...);\n}\n\ntemplate <typename... Args>\nconstexpr int param_counter(int first)\n{\n  return first;\n}\n\ntemplate <typename... Args>\nconstexpr int param_counter(int first, int next)\n{\n  return (first + 1) * next;\n}\n\ntemplate <typename... Args>\nconstexpr int param_counter(int first, int next, Args... args)\n{\n  return (first + 1) * next + param_counter(next, args...);\n}\n\ntemplate <typename... Args>\nconstexpr int layer_counter(int first)\n{\n  return 1;\n}\n\ntemplate <typename... Args>\nconstexpr int layer_counter(int first, Args... args)\n{\n  return 1 + layer_counter(args...);\n}\n\ntemplate <typename... Args>\nconstexpr int neuron_counter(int first)\n{\n  return first;\n}\n\ntemplate <typename... Args>\nconstexpr int neuron_counter(int first, Args... args)\n{\n  return (first > neuron_counter(args...)) ? first : neuron_counter(args...);\n}\n\n#endif  // MPPIGENERIC_META_MATH_H\n"
  },
  {
    "path": "include/mppi/utils/numerical_integration.h",
    "content": "//\n// Created by mgandhi3 on 7/16/21.\n//\n\n#ifndef MPPIGENERIC_NUMERICAL_INTEGRATION_H\n#define MPPIGENERIC_NUMERICAL_INTEGRATION_H\n\n#include <Eigen/Dense>\n\ntemplate <class DYN>\nvoid rk4integrate(DYN* dynamics, float dt, const Eigen::Ref<typename DYN::state_array>& x_k,\n                  const Eigen::Ref<typename DYN::control_array>& u_k, Eigen::Ref<typename DYN::state_array> x_kp1)\n{\n  // Assume a zero order hold on the control\n  typename DYN::state_array k1, k2, k3, k4, x_next1, x_next2, x_next3, x_next4;\n  typename DYN::output_array output;\n  dynamics->step(x_k, x_next1, k1, u_k, output, 0, dt / 2);\n  dynamics->step(x_next1, x_next2, k2, u_k, output, 0, dt / 2);\n  dynamics->step(x_next2, x_next3, k3, u_k, output, 0, dt);\n  dynamics->step(x_next3, x_next4, k4, u_k, output, 0, dt / 2);\n  // dynamics->computeStateDeriv(x_k, u_k, k1);\n  // dynamics->computeStateDeriv(x_k + k1 * dt / 2, u_k, k2);\n  // dynamics->computeStateDeriv(x_k + k2 * dt / 2, u_k, k3);\n  // dynamics->computeStateDeriv(x_k + k3 * dt, u_k, k4);\n  x_kp1 = x_k + (k1 + 2 * k2 + 2 * k3 + k4) * dt / 6;\n}\n\n#endif  // MPPIGENERIC_NUMERICAL_INTEGRATION_H\n"
  },
  {
    "path": "include/mppi/utils/parallel_utils.cuh",
    "content": "//\n// Created by Bogdan on 8/20/23.\n//\n#pragma once\n#include <mppi/utils/cuda_math_utils.cuh>\n\nnamespace mppi\n{\nnamespace p1  // parallelize to 1 index and step\n{\nenum class Parallel1Dir : int\n{\n  THREAD_X = 0,\n  THREAD_Y,\n  THREAD_Z,\n  THREAD_XY,\n  THREAD_YX,\n  THREAD_XZ,\n  THREAD_ZX,\n  THREAD_YZ,\n  THREAD_ZY,\n  THREAD_XYZ,\n  GLOBAL_X,\n  GLOBAL_Y,\n  GLOBAL_Z,\n  NONE,\n};\n\ntemplate <Parallel1Dir P_DIR>\ninline __host__ __device__ void getParallel1DIndex(int& p_index, int& p_step);\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_X>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.x;\n  p_step = blockDim.x;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_Y>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.y;\n  p_step = blockDim.y;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_Z>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.z;\n  p_step = blockDim.z;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_XY>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.x + blockDim.x * threadIdx.y;\n  p_step = blockDim.x * blockDim.y;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_XZ>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.x + blockDim.x * threadIdx.z;\n  p_step = blockDim.x * blockDim.z;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_YX>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.y + blockDim.y * threadIdx.x;\n  p_step = blockDim.y * blockDim.x;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_YZ>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.y + blockDim.y * threadIdx.z;\n  p_step = blockDim.y * blockDim.z;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_ZX>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.z + blockDim.z * threadIdx.x;\n  p_step = blockDim.z * blockDim.x;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_ZY>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.z + blockDim.z * threadIdx.y;\n  p_step = blockDim.z * blockDim.y;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::THREAD_XYZ>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n  p_step = blockDim.x * blockDim.y * blockDim.z;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::GLOBAL_X>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.x + blockDim.x * blockIdx.x;\n  p_step = gridDim.x * blockDim.x;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::GLOBAL_Y>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.y + blockDim.y * blockIdx.y;\n  p_step = gridDim.y * blockDim.y;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::GLOBAL_Z>(int& p_index, int& p_step)\n{\n#ifdef __CUDA_ARCH__\n  p_index = threadIdx.z + blockDim.z * blockIdx.z;\n  p_step = gridDim.z * blockDim.z;\n#else\n  p_index = 0;\n  p_step = 1;\n#endif\n}\n\ntemplate <>\ninline __host__ __device__ void getParallel1DIndex<Parallel1Dir::NONE>(int& p_index, int& p_step)\n{\n  p_index = 0;\n  p_step = 1;\n}\n\ntemplate <Parallel1Dir P_DIR = Parallel1Dir::THREAD_Y, class T = float>\ninline __device__ void loadArrayParallel(T* __restrict__ a1, const int off1, const T* __restrict__ a2, const int off2,\n                                         const int N)\n{\n  int p_index, p_step;\n  getParallel1DIndex<P_DIR>(p_index, p_step);\n  if (N % 4 == 0 && sizeof(type4<T>) <= 16 && off1 % 4 == 0 && off2 % 4 == 0)\n  {\n    for (int i = p_index; i < N / 4; i += p_step)\n    {\n      reinterpret_cast<type4<T>*>(&a1[off1])[i] = reinterpret_cast<const type4<T>*>(&a2[off2])[i];\n    }\n  }\n  else if (N % 2 == 0 && sizeof(type2<T>) <= 16 && off1 % 2 == 0 && off2 % 2 == 0)\n  {\n    for (int i = p_index; i < N / 2; i += p_step)\n    {\n      reinterpret_cast<type2<T>*>(&a1[off1])[i] = reinterpret_cast<const type2<T>*>(&a2[off2])[i];\n    }\n  }\n  else\n  {\n    for (int i = p_index; i < N; i += p_step)\n    {\n      a1[off1 + i] = a2[off2 + i];\n    }\n  }\n}\n\ntemplate <int N, Parallel1Dir P_DIR = Parallel1Dir::THREAD_Y, class T = float>\ninline __device__ void loadArrayParallel(T* __restrict__ a1, const int off1, const T* __restrict__ a2, const int off2)\n{\n  loadArrayParallel<P_DIR, T>(a1, off1, a2, off2, N);\n}\n}  // namespace p1\n\nnamespace p2  // parallelize using 2 indices and steps\n{\nenum class Parallel2Dir : int\n{\n  THREAD_XY = 0,\n  THREAD_XZ,\n  THREAD_YZ,\n  THREAD_YX,\n  THREAD_ZX,\n  THREAD_ZY,\n  NONE\n};\n\ntemplate <Parallel2Dir P_DIR>\ninline __host__ __device__ void getParallel2DIndex(int& p1_index, int& p2_index, int& p1_step, int& p2_step)\n{\n#ifndef __CUDA_ARCH__\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::THREAD_XY>(int& p1_index, int& p2_index, int& p1_step,\n                                                                   int& p2_step)\n{\n#ifdef __CUDA_ARCH__\n  p1_index = threadIdx.x;\n  p1_step = blockDim.x;\n  p2_index = threadIdx.y;\n  p2_step = blockDim.y;\n#else\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::THREAD_YZ>(int& p1_index, int& p2_index, int& p1_step,\n                                                                   int& p2_step)\n{\n#ifdef __CUDA_ARCH__\n  p1_index = threadIdx.y;\n  p1_step = blockDim.y;\n  p2_index = threadIdx.z;\n  p2_step = blockDim.z;\n#else\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::THREAD_XZ>(int& p1_index, int& p2_index, int& p1_step,\n                                                                   int& p2_step)\n{\n#ifdef __CUDA_ARCH__\n  p1_index = threadIdx.x;\n  p1_step = blockDim.x;\n  p2_index = threadIdx.z;\n  p2_step = blockDim.z;\n#else\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::THREAD_YX>(int& p1_index, int& p2_index, int& p1_step,\n                                                                   int& p2_step)\n{\n#ifdef __CUDA_ARCH__\n  p1_index = threadIdx.y;\n  p1_step = blockDim.y;\n  p2_index = threadIdx.x;\n  p2_step = blockDim.x;\n#else\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::THREAD_ZY>(int& p1_index, int& p2_index, int& p1_step,\n                                                                   int& p2_step)\n{\n#ifdef __CUDA_ARCH__\n  p1_index = threadIdx.z;\n  p1_step = blockDim.z;\n  p2_index = threadIdx.y;\n  p2_step = blockDim.y;\n#else\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::THREAD_ZX>(int& p1_index, int& p2_index, int& p1_step,\n                                                                   int& p2_step)\n{\n#ifdef __CUDA_ARCH__\n  p1_index = threadIdx.z;\n  p1_step = blockDim.z;\n  p2_index = threadIdx.x;\n  p2_step = blockDim.x;\n#else\n  p1_index = 0;\n  p2_index = 0;\n  p1_step = 1;\n  p2_step = 1;\n#endif\n}\n\ntemplate <>\ninline __device__ void getParallel2DIndex<Parallel2Dir::NONE>(int& p1_index, int& p2_index, int& p1_step, int& p2_step)\n{\n  p1_index = 0;\n  p1_step = 1;\n  p2_index = 0;\n  p2_step = 1;\n}\n}  // namespace p2\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/utils/risk_utils.cu",
    "content": "#include <mppi/utils/risk_utils.cuh>\n\n// #include <thrust/device_vector.h>\n// #include <thrust/sort.h>\n\n#include <algorithm>\n\nnamespace mppi\n{\ntemplate <class T = float>\n__host__ __device__ void insertionSort(T* __restrict__ array, const int N)\n{\n  T temp;\n  int j;\n  for (int i = 1; i < N; i++)\n  {\n    temp = array[i];\n    j = i - 1;\n    while (j >= 0 && array[j] > temp)\n    {\n      array[j + 1] = array[j];\n      --j;\n    }\n    array[j + 1] = temp;\n  }\n}\n\n__host__ __device__ float RiskMeasure::var(float* __restrict__ costs, const int num_costs, float alpha)\n{\n  float cost = 0.0f;\n#ifdef __CUDA_ARCH__\n  // thrust::device_ptr<float> thrust_ptr = thrust::device_pointer_cast(costs);\n  // thrust::sort(thrust::seq, thrust_ptr, thrust_ptr + num_costs);\n  insertionSort<>(costs, num_costs);\n#else\n  std::sort(costs, costs + num_costs);\n#endif\n  float h_idx = h_index(num_costs, alpha);\n  int next_idx = min((int)ceilf(h_idx), num_costs - 1);\n  int prev_idx = max((int)floorf(h_idx), 0);\n  cost = costs[prev_idx] + (h_idx - prev_idx) * (costs[next_idx] - costs[prev_idx]);\n  return cost;\n}\n\n__host__ __device__ float RiskMeasure::cvar(float* __restrict__ costs, const int num_costs, float alpha)\n{\n  float cost = 0.0f;\n  float value_at_risk = var(costs, num_costs, alpha);  // also sorts costs\n  int num_costs_above = 1;\n  float sum_costs_above = value_at_risk;\n  float h_idx = h_index(num_costs, alpha);\n  for (int i = ceilf(h_idx); i < num_costs; i++)\n  {\n    num_costs_above++;\n    sum_costs_above += costs[i];\n  }\n  cost = sum_costs_above / num_costs_above;\n  return cost;\n}\n}  // namespace mppi\n"
  },
  {
    "path": "include/mppi/utils/risk_utils.cuh",
    "content": "#pragma once\n\nnamespace mppi\n{\nclass RiskMeasure\n{\npublic:\n  enum FUNC_TYPE : int\n  {\n    MAX = 0,\n    MIN,\n    VAR,\n    CVAR,\n    MEAN,\n    MEDIAN,\n    NUM_FUNCS\n  };\n\n  FUNC_TYPE func_ = CVAR;\n  float alpha_ = 0.8;\n\n  __host__ __device__ float shaping_func(float* __restrict__ costs, const int num_costs)\n  {\n    return shaping_func(costs, num_costs, func_, alpha_);\n  }\n\n  static __host__ __device__ float shaping_func(float* __restrict__ costs, const int num_costs,\n                                                const FUNC_TYPE type = MEAN, const float risk_tolerance = 0.5f)\n  {\n    float cost = 0.0f;\n    if (num_costs == 1)\n    {\n      return costs[0];\n    }\n    switch (type)\n    {\n      case CVAR:\n        cost = cvar(costs, num_costs, risk_tolerance);\n        break;\n      case MAX:\n        cost = max_measure(costs, num_costs);\n        break;\n      case MEDIAN:\n        cost = var(costs, num_costs, 0.5f);\n        break;\n      case MIN:\n        cost = min_measure(costs, num_costs);\n        break;\n      case VAR:\n        cost = var(costs, num_costs, risk_tolerance);\n        break;\n      default: // go to mean case\n      case MEAN:\n        cost = mean_measure(costs, num_costs);\n        break;\n    }\n    return cost;\n  }\n\n  static __host__ __device__ float max_measure(const float* __restrict__ costs, const int num_costs)\n  {\n    float max_cost = costs[0];\n    for (int i = 1; i < num_costs; i++)\n    {\n      if (costs[i] > max_cost)\n      {\n        max_cost = costs[i];\n      }\n    }\n    return max_cost;\n  }\n\n  static __host__ __device__ float min_measure(const float* __restrict__ costs, const int num_costs)\n  {\n    float min_cost = costs[0];\n    for (int i = 1; i < num_costs; i++)\n    {\n      if (costs[i] < min_cost)\n      {\n        min_cost = costs[i];\n      }\n    }\n    return min_cost;\n  }\n\n  static __host__ __device__ float mean_measure(const float* __restrict__ costs, const int num_costs)\n  {\n    float cost = 0.0f;\n    for (int i = 0; i < num_costs; i++)\n    {\n      cost += costs[i];\n    }\n    return cost / num_costs;\n  }\n\n  static __host__ __device__ float h_index(const int num_costs, const float alpha)\n  {\n    return alpha * (num_costs - 1);\n  }\n\n  static __host__ __device__ float var(float* __restrict__ costs, const int num_costs, float alpha);\n\n  static __host__ __device__ float cvar(float* __restrict__ costs, const int num_costs, float alpha);\n};\n}  // namespace mppi\n\n#ifdef __CUDACC__\n#include \"risk_utils.cu\"\n#endif\n"
  },
  {
    "path": "include/mppi/utils/test_helper.h",
    "content": "//\n// Created by mgandhi3 on 1/13/20.\n//\n\n#ifndef MPPIGENERIC_KERNEL_TESTS_TEST_HELPER_H\n#define MPPIGENERIC_KERNEL_TESTS_TEST_HELPER_H\n#include <gtest/gtest.h>\n#include <Eigen/Dense>\n\n#include <array>\n#include <vector>\n\ninline void array_assert_float_eq(const std::vector<float>& known, const std::vector<float>& compute, int size)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed vector size is not the given size!\";\n  ASSERT_EQ(known.size(), compute.size()) << \"Two vectors are not the same size!\";\n  for (int i = 0; i < size; i++)\n  {\n    ASSERT_FLOAT_EQ(known[i], compute[i]) << \"Failed at index: \" << i;\n  }\n}\n\ninline void array_assert_float_eq(const float known, const std::vector<float>& compute, const int size)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed vector size is not the given size!\";\n  for (int i = 0; i < size; i++)\n  {\n    ASSERT_FLOAT_EQ(known, compute[i]) << \"Failed at index: \" << i;\n  }\n}\n\ntemplate <int size>\ninline void array_assert_float_eq(const std::array<float, size>& known, const std::array<float, size>& compute)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed array size is not the given size!\";\n  for (int i = 0; i < size; i++)\n  {\n    if (isnan(known[i]) || isnan(compute[i]))\n    {\n      ASSERT_EQ(isnan(known[i]), isnan(compute[i])) << \"NaN check failed at index: \" << i;\n    }\n    else if (isinf(known[i]) || isinf(compute[i]))\n    {\n      ASSERT_EQ(isinf(known[i]), isinf(compute[i])) << \"inf check failed at index: \" << i;\n    }\n    else\n    {\n      ASSERT_FLOAT_EQ(known[i], compute[i]) << \"Failed at index: \" << i;\n    }\n  }\n}\n\ntemplate <int size>\ninline void array_expect_float_eq(const std::array<float, size>& known, const std::array<float, size>& compute)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed array size is not the given size!\";\n  for (int i = 0; i < size; i++)\n  {\n    EXPECT_FLOAT_EQ(known[i], compute[i]) << \"Failed at index: \" << i;\n  }\n}\n\ntemplate <int size>\ninline void array_expect_near(const std::array<float, size>& known, const std::array<float, size>& compute, float tol)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed array size is not the given size!\";\n  for (int i = 0; i < size; i++)\n  {\n    EXPECT_NEAR(known[i], compute[i], tol) << \"Failed at index: \" << i;\n  }\n}\n\ntemplate <int size>\ninline void array_assert_float_eq(const float known, const std::array<float, size>& compute)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed array size is not the given size!\";\n  for (int i = 0; i < size; i++)\n  {\n    ASSERT_FLOAT_EQ(known, compute[i]) << \"Failed at index: \" << i;\n  }\n}\n\ntemplate <class EIGEN_MAT>\ninline void eigen_assert_float_eq(const Eigen::Ref<const EIGEN_MAT>& known, const Eigen::Ref<const EIGEN_MAT>& compute)\n{\n  for (int i = 0; i < known.size(); i++)\n  {\n    ASSERT_FLOAT_EQ(known.data()[i], compute.data()[i]) << \"Failed at index: \" << i;\n  }\n}\n\ntemplate <class EIGEN_MAT>\ninline void eigen_assert_float_near(const Eigen::Ref<const EIGEN_MAT>& known,\n                                    const Eigen::Ref<const EIGEN_MAT>& compute, float tol)\n{\n  for (int i = 0; i < known.size(); i++)\n  {\n    ASSERT_NEAR(known.data()[i], compute.data()[i], tol * known.data()[i]) << \"Failed at index: \" << i;\n  }\n}\n\ntemplate <int size>\ninline void array_assert_float_near(const std::array<float, size>& known, const std::array<float, size>& compute,\n                                    float tol)\n{\n  ASSERT_EQ(compute.size(), size) << \"The computed array size is not the given size!\";\n  for (int i = 0; i < size; i++)\n  {\n    ASSERT_NEAR(known[i], compute[i], tol) << \"Failed at index: \" << i;\n  }\n}\n\n#endif  // MPPIGENERIC_KERNEL_TESTS_TEST_HELPER_H\n"
  },
  {
    "path": "include/mppi/utils/texture_helpers/texture_helper.cu",
    "content": "#include \"texture_helper.cuh\"\n\ntemplate <class TEX_T, class DATA_T>\nTextureHelper<TEX_T, DATA_T>::TextureHelper(int number, cudaStream_t stream) : Managed(stream), size_(number)\n{\n  textures_.resize(number);\n  textures_buffer_.resize(number);\n  cpu_values_.resize(number);\n  cpu_buffer_values_.resize(number);\n  textures_d_ = textures_.data();\n}\n\ntemplate <class TEX_T, class DATA_T>\nTextureHelper<TEX_T, DATA_T>::~TextureHelper()\n{\n  freeCudaMem();\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::GPUSetup()\n{\n  if (!GPUMemStatus_)\n  {\n    TEX_T* derived = static_cast<TEX_T*>(this);\n    ptr_d_ = Managed::GPUSetup<TEX_T>(derived);\n    // allocates memory to access params on the GPU by pointer\n    HANDLE_ERROR(cudaMalloc(&params_d_, sizeof(TextureParams<DATA_T>) * textures_.size()));\n    HANDLE_ERROR(cudaMemcpyAsync(&(ptr_d_->textures_d_), &(params_d_), sizeof(TextureParams<DATA_T>*),\n                                 cudaMemcpyHostToDevice, this->stream_));\n    copyToDevice(true);\n  }\n  else\n  {\n    std::cout << \"GPU Memory already set\" << std::endl;\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::freeCudaMem()\n{\n  if (this->GPUMemStatus_)\n  {\n    for (int index = 0; index < textures_.size(); index++)\n    {\n      freeCudaMem(textures_[index]);\n    }\n    if (params_d_ != nullptr)\n    {\n      HANDLE_ERROR(cudaFree(params_d_));\n    }\n    if (ptr_d_ != nullptr)\n    {\n      HANDLE_ERROR(cudaFree(ptr_d_));\n    }\n  }\n  this->GPUMemStatus_ = false;\n  this->params_d_ = nullptr;\n  this->ptr_d_ = nullptr;\n  CudaCheckError();\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::freeCudaMem(TextureParams<DATA_T>& texture)\n{\n  if (texture.allocated)\n  {\n    HANDLE_ERROR(cudaFreeArray(texture.array_d));\n    HANDLE_ERROR(cudaDestroyTextureObject(texture.tex_d));\n    texture.allocated = false;\n    texture.array_d = nullptr;\n    texture.tex_d = 0;\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::allocateCudaTexture(int index)\n{\n  // if already allocated, deallocate\n  if (this->GPUMemStatus_ && textures_[index].allocated)\n  {\n    freeCudaMem(textures_[index]);\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ void TextureHelper<TEX_T, DATA_T>::bodyOffsetToWorldPose(const float3& offset,\n                                                                             const float3& body_pose,\n                                                                             const float3& rotation, float3& output)\n{\n  mppi::math::bodyOffsetToWorldPoseEuler(offset, body_pose, rotation, output);\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ void TextureHelper<TEX_T, DATA_T>::worldPoseToMapPose(const int index, const float3& input,\n                                                                          float3& output)\n{\n  float3 diff = make_float3(input.x - textures_d_[index].origin.x, input.y - textures_d_[index].origin.y,\n                            input.z - textures_d_[index].origin.z);\n  float3* rotation_mat_ptr = textures_d_[index].rotations;\n  output.x = (rotation_mat_ptr[0].x * diff.x + rotation_mat_ptr[0].y * diff.y + rotation_mat_ptr[0].z * diff.z);\n  output.y = (rotation_mat_ptr[1].x * diff.x + rotation_mat_ptr[1].y * diff.y + rotation_mat_ptr[1].z * diff.z);\n  output.z = (rotation_mat_ptr[2].x * diff.x + rotation_mat_ptr[2].y * diff.y + rotation_mat_ptr[2].z * diff.z);\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ void TextureHelper<TEX_T, DATA_T>::mapPoseToTexCoord(const int index, const float3& input,\n                                                                         float3& output)\n{\n  // printf(\"res %f %f %f extent %f %f %f\\n\", textures_d_[index].resolution.x, textures_d_[index].resolution.y,\n  // textures_d_[index].resolution.z, textures_d_[index].extent.width, textures_d_[index].extent.depth);\n  // from map frame to pixels [m] -> [px]\n  output.x = input.x / textures_d_[index].resolution.x;\n  output.y = input.y / textures_d_[index].resolution.y;\n  output.z = input.z / textures_d_[index].resolution.z;\n\n  // normalize pixel values\n  output.x /= textures_d_[index].extent.width;\n  output.y /= textures_d_[index].extent.height;\n  if (textures_d_[index].extent.depth != 0)\n  {\n    output.z /= textures_d_[index].extent.depth;\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ void TextureHelper<TEX_T, DATA_T>::worldPoseToTexCoord(const int index, const float3& input,\n                                                                           float3& output)\n{\n  float3 map;\n  worldPoseToMapPose(index, input, map);\n  mapPoseToTexCoord(index, map, output);\n  // printf(\"world to map (%f, %f, %f) -> (%f, %f, %f) -> (%f, %f, %f)\\n\", input.x, input.y, input.z, map.x, map.y,\n  // map.z, output.x, output.y, output.z);\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ void TextureHelper<TEX_T, DATA_T>::bodyOffsetWorldToTexCoord(const int index, const float3& offset,\n                                                                                 const float3& body_pose,\n                                                                                 const float3& rotation, float3& output)\n{\n  float3 offset_result;\n  bodyOffsetToWorldPose(offset, body_pose, rotation, offset_result);\n  worldPoseToTexCoord(index, offset_result, output);\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::copyToDevice(bool synchronize)\n{\n  // TODO lock the buffer\n  // copies the buffer to the CPU side version\n  for (int i = 0; i < textures_buffer_.size(); i++)\n  {\n    if (textures_buffer_[i].update_params)\n    {\n      // copy over params from buffer to object\n      textures_[i].origin = textures_buffer_[i].origin;\n      textures_[i].rotations[0] = textures_buffer_[i].rotations[0];\n      textures_[i].rotations[1] = textures_buffer_[i].rotations[1];\n      textures_[i].rotations[2] = textures_buffer_[i].rotations[2];\n      textures_[i].resolution = textures_buffer_[i].resolution;\n      textures_[i].update_params = true;\n      textures_buffer_[i].update_params = false;\n    }\n    // copy the relevant things over from buffer\n    if (textures_buffer_[i].update_data)\n    {\n      // moves data from cpu buffer to cpu side\n      cpu_values_[i] = std::move(cpu_buffer_values_[i]);\n      // cpu_buffer_values are resized in the updateTexture method\n      textures_[i].update_data = true;\n      textures_buffer_[i].update_data = false;\n      textures_[i].use = textures_buffer_[i].use;\n    }\n    if (textures_buffer_[i].update_mem)\n    {\n      textures_[i].extent = textures_buffer_[i].extent;\n      textures_[i].texDesc = textures_buffer_[i].texDesc;\n      textures_[i].update_mem = true;\n      textures_buffer_[i].update_mem = false;\n    }\n  }\n  // TODO unlock buffer\n\n  if (!this->GPUMemStatus_)\n  {\n    return;\n  }\n\n  // goes through and checks what needs to be copied and does it\n  TEX_T* derived = static_cast<TEX_T*>(this);\n  for (int i = 0; i < textures_.size(); i++)\n  {\n    TextureParams<DATA_T>* param = &textures_[i];\n\n    // do the allocation and texture creation\n    if (param->update_mem)\n    {\n      derived->allocateCudaTexture(i);\n      derived->createCudaTexture(i, false);\n    }\n    // if allocated\n    if (param->allocated)\n    {\n      // if we have new parameter values copy it over\n      if (param->update_params)\n      {\n        derived->copyParamsToGPU(i, false);\n      }\n\n      // if we have updated data copy it over\n      if (param->update_data)\n      {\n        // copies data to the GPU\n        derived->copyDataToGPU(i, false);\n      }\n    }\n  }\n  if (synchronize)\n  {\n    cudaStreamSynchronize(this->stream_);\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::createCudaTexture(int index, bool sync)\n{\n  TextureParams<DATA_T>* cpu_param = &textures_[index];\n  cpu_param->resDesc.res.array.array = cpu_param->array_d;\n\n  HANDLE_ERROR(cudaCreateTextureObject(&(cpu_param->tex_d), &cpu_param->resDesc, &cpu_param->texDesc, NULL));\n\n  cpu_param->allocated = true;\n  cpu_param->update_mem = false;\n\n  copyParamsToGPU(index, sync);\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::addNewTexture(const cudaExtent& extent)\n{\n  // update the buffer not the actual textures\n  textures_buffer_.resize(textures_buffer_.size() + 1);\n  textures_.resize(textures_.size() + 1);\n  textures_buffer_.back().extent = extent;\n  textures_.back().extent = extent;\n  size_ = textures_.size();\n\n  if (this->GPUMemStatus_)\n  {\n    TEX_T* derived = static_cast<TEX_T*>(this);\n    int index = textures_.size() - 1;\n\n    // TODO resize the device side array that stores textures\n\n    derived->allocateCudaTexture(index);\n    derived->createCudaTexture(index);\n    textures_.back().allocated = true;\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ DATA_T TextureHelper<TEX_T, DATA_T>::queryTextureAtWorldOffsetPose(const int index,\n                                                                                       const float3& input,\n                                                                                       const float3& offset,\n                                                                                       const float3& rotation)\n{\n  float3 tex_coords;\n  bodyOffsetWorldToTexCoord(index, offset, input, rotation, tex_coords);\n  TEX_T* derived = static_cast<TEX_T*>(this);\n  return derived->queryTexture(index, tex_coords);\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ DATA_T TextureHelper<TEX_T, DATA_T>::queryTextureAtWorldPose(const int index, const float3& input)\n{\n  float3 tex_coords;\n  worldPoseToTexCoord(index, input, tex_coords);\n  TEX_T* derived = static_cast<TEX_T*>(this);\n  return derived->queryTexture(index, tex_coords);\n}\n\ntemplate <class TEX_T, class DATA_T>\n__host__ __device__ DATA_T TextureHelper<TEX_T, DATA_T>::queryTextureAtMapPose(const int index, const float3& input)\n{\n  float3 tex_coords;\n  mapPoseToTexCoord(index, input, tex_coords);\n  TEX_T* derived = static_cast<TEX_T*>(this);\n  return derived->queryTexture(index, tex_coords);\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateOrigin(int index, float3 new_origin)\n{\n  this->textures_buffer_[index].origin = new_origin;\n  this->textures_buffer_[index].update_params = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateRotation(int index, std::array<float3, 3>& new_rotation)\n{\n  this->textures_buffer_[index].rotations[0] = new_rotation[0];\n  this->textures_buffer_[index].rotations[1] = new_rotation[1];\n  this->textures_buffer_[index].rotations[2] = new_rotation[2];\n  this->textures_buffer_[index].update_params = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateResolution(int index, float resolution)\n{\n  this->textures_buffer_[index].resolution.x = resolution;\n  this->textures_buffer_[index].resolution.y = resolution;\n  this->textures_buffer_[index].resolution.z = resolution;\n  this->textures_buffer_[index].update_params = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateResolution(int index, float3 resolution)\n{\n  this->textures_buffer_[index].resolution.x = resolution.x;\n  this->textures_buffer_[index].resolution.y = resolution.y;\n  this->textures_buffer_[index].resolution.z = resolution.z;\n  this->textures_buffer_[index].update_params = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nbool TextureHelper<TEX_T, DATA_T>::setExtent(int index, cudaExtent& extent)\n{\n  // checks if the extent has changed and reallocates if yes\n  TextureParams<DATA_T>* param = &textures_buffer_[index];\n  if (param->extent.width != extent.width || param->extent.height != extent.height ||\n      param->extent.depth != extent.depth)\n  {\n    // flag to update mem next time we should\n    param->update_mem = true;\n    this->textures_buffer_[index].extent = extent;\n    return true;\n  }\n  return false;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::copyParamsToGPU(int index, bool sync)\n{\n  TextureParams<DATA_T>* cpu_param = &textures_[index];\n\n  // Copy entire param structure over from CPU to GPU\n  HANDLE_ERROR(cudaMemcpyAsync(&(params_d_[index]), cpu_param, sizeof(TextureParams<DATA_T>), cudaMemcpyHostToDevice,\n                               this->stream_));\n  cpu_param->update_params = false;\n  if (sync)\n  {\n    cudaStreamSynchronize(this->stream_);\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateAddressMode(int index, cudaTextureAddressMode mode)\n{\n  this->textures_buffer_[index].texDesc.addressMode[0] = mode;\n  this->textures_buffer_[index].texDesc.addressMode[1] = mode;\n  this->textures_buffer_[index].texDesc.addressMode[2] = mode;\n  this->textures_buffer_[index].update_mem = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateAddressMode(int index, int layer, cudaTextureAddressMode mode)\n{\n  this->textures_buffer_[index].texDesc.addressMode[layer] = mode;\n  this->textures_buffer_[index].update_mem = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateBorder(int index, float value)\n{\n  this->textures_buffer_[index].texDesc.borderColor[0] = value;\n  this->textures_buffer_[index].texDesc.borderColor[1] = value;\n  this->textures_buffer_[index].texDesc.borderColor[2] = value;\n  this->textures_buffer_[index].texDesc.borderColor[3] = value;\n  this->textures_buffer_[index].update_mem = true;\n}\n\ntemplate <class TEX_T, class DATA_T>\nvoid TextureHelper<TEX_T, DATA_T>::updateBorder(int index, int layer, float value)\n{\n  this->textures_buffer_[index].texDesc.borderColor[layer] = value;\n  this->textures_buffer_[index].update_mem = true;\n}\n"
  },
  {
    "path": "include/mppi/utils/texture_helpers/texture_helper.cuh",
    "content": "//\n// Created by jason on 1/5/22.\n//\n\n#ifndef MPPIGENERIC_TEXTURE_HELPER_CUH\n#define MPPIGENERIC_TEXTURE_HELPER_CUH\n\n#include <mppi/utils/managed.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n#include <mppi/utils/math_utils.h>\n\n#include <array>\n\ntemplate <class DATA_T>\nstruct TextureParams\n{\n  cudaExtent extent;\n\n  cudaArray* array_d = nullptr;\n  cudaTextureObject_t tex_d = 0;\n  cudaChannelFormatDesc channelDesc;\n  cudaResourceDesc resDesc;\n  cudaTextureDesc texDesc;\n\n  float3 origin;\n  float3 rotations[3];\n  float3 resolution;\n\n  bool use = false;        // indicates that the texture is to be used or not, separate from allocation\n  bool allocated = false;  // indicates that the texture has been allocated on the GPU\n  bool update_data = false;\n  bool update_mem = false;  // indicates the GPU structure should be updated at the next convenient time\n  bool update_params = false;\n\n  TextureParams()\n  {\n    extent = make_cudaExtent(1, 1, 1);\n    resDesc.resType = cudaResourceTypeArray;\n    channelDesc = cudaCreateChannelDesc<DATA_T>();\n\n    // clamp\n    memset(&texDesc, 0, sizeof(texDesc));\n    texDesc.addressMode[0] = cudaAddressModeClamp;\n    texDesc.addressMode[1] = cudaAddressModeClamp;\n    texDesc.addressMode[2] = cudaAddressModeClamp;\n    texDesc.borderColor[0] = 0.0f;\n    texDesc.borderColor[1] = 0.0f;\n    texDesc.borderColor[2] = 0.0f;\n    texDesc.borderColor[3] = 0.0f;\n    texDesc.filterMode = cudaFilterModeLinear;\n    texDesc.readMode = cudaReadModeElementType;\n    texDesc.normalizedCoords = 1;\n\n    origin = make_float3(0.0, 0.0, 0.0);\n    rotations[0] = make_float3(1, 0, 0);\n    rotations[1] = make_float3(0, 1, 0);\n    rotations[2] = make_float3(0, 0, 1);\n    resolution = make_float3(1, 1, 1);\n  }\n};\n\ntemplate <class TEX_T, class DATA_T>\nclass TextureHelper : public Managed\n{\nprotected:\n  TextureHelper(int number, cudaStream_t stream = 0);\n\npublic:\n  virtual ~TextureHelper();\n\n  void GPUSetup();\n\n  static void freeCudaMem(TextureParams<DATA_T>& texture);\n  virtual void freeCudaMem();\n\n  /**\n   * helper method to deallocate the index before allocating new ones\n   */\n  virtual void allocateCudaTexture(int index);\n  /**\n   * helper method to create a cuda texture\n   * @param index\n   */\n  virtual void createCudaTexture(int index, bool sync = true);\n\n  /**\n   * Copies texture information to the GPU version of the object\n   */\n  virtual void copyToDevice(bool synchronize = false);\n\n  /**\n   *\n   */\n  virtual void addNewTexture(const cudaExtent& extent);\n\n  __host__ __device__ void bodyOffsetToWorldPose(const float3& offset, const float3& body_pose, const float3& rotation,\n                                                 float3& output);\n  __host__ __device__ void worldPoseToMapPose(const int index, const float3& input, float3& output);\n  __host__ __device__ void mapPoseToTexCoord(const int index, const float3& input, float3& output);\n  __host__ __device__ void worldPoseToTexCoord(const int index, const float3& input, float3& output);\n  __host__ __device__ void bodyOffsetWorldToTexCoord(const int index, const float3& offset, const float3& body_pose,\n                                                     const float3& rotation, float3& output);\n  __host__ __device__ DATA_T queryTextureAtWorldOffsetPose(const int index, const float3& input, const float3& offset,\n                                                           const float3& rotation);\n  __host__ __device__ DATA_T queryTextureAtWorldPose(const int index, const float3& input);\n  __host__ __device__ DATA_T queryTextureAtMapPose(const int index, const float3& input);\n\n  virtual void updateOrigin(int index, float3 new_origin);\n  virtual void updateRotation(int index, std::array<float3, 3>& new_rotation);\n  virtual void updateResolution(int index, float resolution);\n  virtual void updateResolution(int index, float3 resolution);\n  virtual bool setExtent(int index, cudaExtent& extent);\n  virtual void copyDataToGPU(int index, bool sync = false) = 0;\n  virtual void copyParamsToGPU(int index, bool sync = false);\n  virtual void enableTexture(int index)\n  {\n    this->textures_buffer_[index].update_params = true;\n    this->textures_buffer_[index].use = true;\n  }\n  virtual void disableTexture(int index)\n  {\n    this->textures_buffer_[index].update_params = true;\n    this->textures_buffer_[index].use = false;\n  }\n  __device__ __host__ bool checkTextureUse(int index) const\n  {\n    return this->textures_d_[index].use;\n  }\n\n  void updateAddressMode(int index, cudaTextureAddressMode mode);\n  void updateAddressMode(int index, int layer, cudaTextureAddressMode mode);\n\n  void updateBorder(int index, float value);\n  void updateBorder(int index, int layer, float value);\n\n  std::vector<TextureParams<DATA_T>> getTextures()\n  {\n    return textures_;\n  }\n  std::vector<TextureParams<DATA_T>> getBufferTextures()\n  {\n    return textures_buffer_;\n  }\n\n  std::vector<std::vector<DATA_T>> getCpuValues()\n  {\n    return cpu_values_;\n  }\n\n  std::vector<std::vector<DATA_T>> getCpuBufferValues()\n  {\n    return cpu_buffer_values_;\n  }\n\n  void updateDataAtIndex(int index)\n  {\n    this->textures_buffer_[index].update_data = true;\n  }\n\n  __device__ __host__ int size()\n  {\n    return size_;\n  }\n\n  TEX_T* ptr_d_ = nullptr;\n\n  __host__ __device__ float3 getOrigin(int index) const\n  {\n    return this->textures_d_[index].origin;\n  }\n\n  __host__ __device__ float3 getResolution(int index) const\n  {\n    return this->textures_d_[index].resolution;\n  }\n\n  __host__ __device__ cudaExtent getExtent(int index) const\n  {\n    return this->textures_d_[index].extent;\n  }\n\nprotected:\n  // std::mutex buffer_lck_;\n\n  // stores the values we actually use\n  std::vector<TextureParams<DATA_T>> textures_;\n  // buffer that can be updated async, does not have correct cuda memory locations\n  std::vector<TextureParams<DATA_T>> textures_buffer_;\n\n  // memory allocation can vary depending on 1D, 2D, or 3D implementation\n  std::vector<std::vector<DATA_T>> cpu_values_;\n  std::vector<std::vector<DATA_T>> cpu_buffer_values_;\n\n  // helper, on CPU points to vector data (textures_.data()), on GPU points to device copy (params_d_ variable)\n  TextureParams<DATA_T>* textures_d_ = nullptr;\n\n  // device pointer to the parameters malloced memory\n  TextureParams<DATA_T>* params_d_ = nullptr;\n  int size_ = 0;\n};\n\n#if __CUDACC__\n#include \"texture_helper.cu\"\n#endif\n\n#endif  // MPPIGENERIC_TEXTURE_HELPER_CUH\n"
  },
  {
    "path": "include/mppi/utils/texture_helpers/three_d_texture_helper.cu",
    "content": "#include \"three_d_texture_helper.cuh\"\n\ntemplate <class DATA_T>\nThreeDTextureHelper<DATA_T>::ThreeDTextureHelper(int number, bool synched, cudaStream_t stream)\n  : TextureHelper<ThreeDTextureHelper<DATA_T>, DATA_T>(number, stream)\n{\n  layer_copy_.resize(number);\n  this->synched_ = synched;\n  for (std::vector<bool>& layer : layer_copy_)\n  {\n    // sets all current indexes to be true\n    std::fill(layer.begin(), layer.end(), false);\n  }\n}\n\ntemplate <class DATA_T>\nvoid ThreeDTextureHelper<DATA_T>::allocateCudaTexture(int index)\n{\n  TextureHelper<ThreeDTextureHelper<DATA_T>, DATA_T>::allocateCudaTexture(index);\n\n  TextureParams<DATA_T>* param = &this->textures_[index];\n\n  // TODO check to make sure our alloc is correct, i.e. extent is valid\n  HANDLE_ERROR(cudaMalloc3DArray(&(param->array_d), &(param->channelDesc), param->extent));\n}\n\ntemplate <class DATA_T>\nvoid ThreeDTextureHelper<DATA_T>::updateTexture(const int index, const int z_index, std::vector<DATA_T>& values,\n                                                bool column_major)\n{\n  TextureParams<DATA_T>* param = &this->textures_buffer_[index];\n  int w = param->extent.width;\n  int h = param->extent.height;\n  int d = param->extent.depth;\n\n  // check that the sizes are correct\n  if (values.size() != w * h)\n  {\n    throw std::runtime_error(std::string(\"Error: invalid size to updateTexture \") + std::to_string(values.size()) +\n                             \" != \" + std::to_string(w * h));\n  }\n\n  // TODO needs to be in the data format used for textures\n\n  if (this->cpu_buffer_values_[index].size() != w * h * d)\n  {\n    this->cpu_buffer_values_[index].resize(w * h * d);\n    // copies values back to the buffer if it has been recently moved\n    std::copy(this->cpu_values_[index].begin(), this->cpu_values_[index].end(),\n              this->cpu_buffer_values_[index].begin());\n  }\n  // copy over values to cpu side holder\n  if (column_major)\n  {\n    for (int j = 0; j < w; j++)\n    {\n      for (int i = 0; i < h; i++)\n      {\n        int columnMajorIndex = j * h + i;\n        int rowMajorIndex = (w * h * z_index) + i * w + j;\n        this->cpu_buffer_values_[index][rowMajorIndex] = values[columnMajorIndex];\n      }\n    }\n  }\n  else\n  {\n    auto start = this->cpu_buffer_values_[index].begin() + (w * h * z_index);\n    std::copy(values.begin(), values.end(), start);\n  }\n  // tells the object to copy it over next time that happens\n  layer_copy_[index][z_index] = true;\n  if (!synched_)\n  {\n    param->update_data = true;\n  }\n}\n\ntemplate <class DATA_T>\nvoid ThreeDTextureHelper<DATA_T>::updateTexture(\n    const int index, const int z_index,\n    const Eigen::Ref<const Eigen::Matrix<DATA_T, Eigen::Dynamic, Eigen::Dynamic>, 0,\n                     Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>\n        values,\n    bool column_major)\n{\n  TextureParams<DATA_T>* param = &this->textures_buffer_[index];\n  int w = param->extent.width;\n  int h = param->extent.height;\n  int d = param->extent.depth;\n\n  // check that the sizes are correct\n  if (values.size() != w * h)\n  {\n    throw std::runtime_error(std::string(\"Error: invalid size to updateTexture \") + std::to_string(values.size()) +\n                             \" != \" + std::to_string(w * h));\n  }\n\n  if (this->cpu_buffer_values_[index].size() != w * h * d)\n  {\n    this->cpu_buffer_values_[index].resize(w * h * d);\n    // copies values back to the buffer if it has been recently moved\n    std::copy(this->cpu_values_[index].begin(), this->cpu_values_[index].end(),\n              this->cpu_buffer_values_[index].begin());\n  }\n\n  // copy over values to cpu side holder\n  if (column_major)\n  {\n    for (int j = 0; j < w; j++)\n    {\n      for (int i = 0; i < h; i++)\n      {\n        int columnMajorIndex = j * h + i;\n        int rowMajorIndex = (w * h * z_index) + i * w + j;\n        this->cpu_buffer_values_[index][rowMajorIndex] = values.data()[columnMajorIndex];\n      }\n    }\n  }\n  else\n  {\n    auto start = this->cpu_buffer_values_[index].data() + (w * h * z_index);\n    memcpy(start, values.data(), values.size() * sizeof(DATA_T));\n  }\n  // tells the object to copy it over next time that happens\n  layer_copy_[index][z_index] = true;\n  if (!synched_)\n  {\n    param->update_data = true;\n  }\n}\n\n// TODO update texture where everything is copied over in one go\n\ntemplate <class DATA_T>\n__host__ __device__ DATA_T ThreeDTextureHelper<DATA_T>::queryTexture(const int index, const float3& point)\n{\n#ifdef __CUDA_ARCH__\n  return tex3D<DATA_T>(this->textures_d_[index].tex_d, point.x, point.y, point.z);\n#else\n  TextureParams<DATA_T>* param = &this->textures_[index];\n  float3 query =\n      make_float3(point.x * param->extent.width, point.y * param->extent.height, point.z * param->extent.depth);\n  query.x = query.x - 0.5f;\n  query.y = query.y - 0.5f;\n  query.z = query.z - 0.5f;\n  if (param->texDesc.addressMode[0] == cudaAddressModeClamp)\n  {\n    if (query.x > param->extent.width - 1)\n    {\n      query.x = param->extent.width - 1;\n    }\n    else if (query.x <= 0.0f)\n    {\n      query.x = 0.0;\n    }\n  }\n  else if (param->texDesc.addressMode[0] == cudaAddressModeBorder)\n  {\n    if (query.x > param->extent.width - 1 || query.x <= 0.0)\n    {\n      return createPartialCudaTuple<DATA_T>(param->texDesc.borderColor[0], param->texDesc.borderColor[1],\n                                            param->texDesc.borderColor[2], param->texDesc.borderColor[3]);\n    }\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported address mode on the CPU in texture utils\"));\n  }\n  if (param->texDesc.addressMode[1] == cudaAddressModeClamp)\n  {\n    if (query.y > param->extent.height - 1)\n    {\n      query.y = param->extent.height - 1;\n    }\n    else if (query.y <= 0.0f)\n    {\n      query.y = 0.0;\n    }\n  }\n  else if (param->texDesc.addressMode[1] == cudaAddressModeBorder)\n  {\n    if (query.y > param->extent.height - 1 || query.y <= 0.0)\n    {\n      return createPartialCudaTuple<DATA_T>(param->texDesc.borderColor[0], param->texDesc.borderColor[1],\n                                            param->texDesc.borderColor[2], param->texDesc.borderColor[3]);\n    }\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported address mode on the CPU in texture utils\"));\n  }\n  if (param->texDesc.addressMode[2] == cudaAddressModeClamp)\n  {\n    if (query.z > param->extent.depth - 1)\n    {\n      query.z = param->extent.depth - 1;\n    }\n    else if (query.z <= 0.0f)\n    {\n      query.z = 0.0;\n    }\n  }\n  else if (param->texDesc.addressMode[2] == cudaAddressModeBorder)\n  {\n    if (query.z > param->extent.depth - 1 || query.z <= 0.0)\n    {\n      return createPartialCudaTuple<DATA_T>(param->texDesc.borderColor[0], param->texDesc.borderColor[1],\n                                            param->texDesc.borderColor[2], param->texDesc.borderColor[3]);\n    }\n  }\n  else if (param->texDesc.addressMode[2] == cudaAddressModeWrap)\n  {\n    while (query.z > param->extent.depth - 1)\n    {\n      query.z -= param->extent.depth - 1;\n    }\n    while (query.z < 0.0f)\n    {\n      query.z += param->extent.depth - 1;\n    }\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported address mode on the CPU in texture utils\"));\n  }\n\n  const int w = param->extent.width;\n  const int h = param->extent.height;\n  if (param->texDesc.filterMode == cudaFilterModeLinear)\n  {\n    // the value is distributed evenly in the space starting at half a cell from 0.0\n    int x_min = std::min((int)std::floor(query.x), w - 2);\n    int x_max = x_min + 1;\n    int y_min = std::min((int)std::floor(query.y), h - 2);\n    int y_max = y_min + 1;\n    int z_min = std::min((int)std::floor(query.z), (int)param->extent.depth - 2);\n    int z_max = z_min + 1;\n\n    float x_d = (query.x - x_min) / (x_max - x_min);\n    float y_d = (query.y - y_min) / (y_max - y_min);\n    float z_d = (query.z - z_min) / (z_max - z_min);\n\n    /**\n     * does trilinear interpolation https://en.wikipedia.org/wiki/Trilinear_interpolation\n     */\n\n    // Query corners of a cube\n    DATA_T c_000 = this->cpu_values_[index][(z_min * h + y_min) * w + x_min];\n    DATA_T c_100 = this->cpu_values_[index][(z_min * h + y_min) * w + x_max];\n    DATA_T c_010 = this->cpu_values_[index][(z_min * h + y_max) * w + x_min];\n    DATA_T c_001 = this->cpu_values_[index][(z_max * h + y_min) * w + x_min];\n    DATA_T c_110 = this->cpu_values_[index][(z_min * h + y_max) * w + x_max];\n    DATA_T c_101 = this->cpu_values_[index][(z_max * h + y_min) * w + x_max];\n    DATA_T c_011 = this->cpu_values_[index][(z_max * h + y_max) * w + x_min];\n    DATA_T c_111 = this->cpu_values_[index][(z_max * h + y_max) * w + x_max];\n\n    // interpolate along x to make a square\n    DATA_T c_00 = c_000 * (1 - x_d) + c_100 * x_d;\n    DATA_T c_01 = c_001 * (1 - x_d) + c_101 * x_d;\n    DATA_T c_10 = c_010 * (1 - x_d) + c_110 * x_d;\n    DATA_T c_11 = c_011 * (1 - x_d) + c_111 * x_d;\n\n    // inperpolate along y to make a line\n    DATA_T c_0 = c_00 * (1 - y_d) + c_10 * y_d;\n    DATA_T c_1 = c_01 * (1 - y_d) + c_11 * y_d;\n\n    // interpolate along z to get the point\n    DATA_T result = c_0 * (1 - z_d) + c_1 * z_d;\n\n    // does the actual interpolation\n    return result;\n  }\n  else if (param->texDesc.filterMode == cudaFilterModePoint)\n  {\n    int rowMajorIndex = (std::round(query.z) * h + std::round(query.y)) * w + std::round(query.x);\n    return this->cpu_values_[index][rowMajorIndex];\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported filter mode on the CPU in texture utils\"));\n  }\n#endif\n}\n\ntemplate <class DATA_T>\nbool ThreeDTextureHelper<DATA_T>::setExtent(int index, cudaExtent& extent)\n{\n  if (extent.depth == 0)\n  {\n    throw std::runtime_error(std::string(\"Error: extent in setExtent invalid,\"\n                                         \" cannot use depth == 0 in 3D texture: using \") +\n                             std::to_string(extent.depth));\n  }\n\n  if (!TextureHelper<ThreeDTextureHelper<DATA_T>, DATA_T>::setExtent(index, extent))\n  {\n    return false;\n  }\n\n  this->cpu_buffer_values_[index].resize(extent.width * extent.height * extent.depth);\n  this->cpu_values_[index].resize(extent.width * extent.height * extent.depth);\n\n  // TODO recopy better when depth changes if possible\n\n  // this means we have changed our extent so we need to copy over all the data layers again\n  for (std::vector<bool>& layer : layer_copy_)\n  {\n    // resizes the array to account for change in depth\n    layer.resize(extent.depth);\n    // sets all current indexes to be true\n    std::fill(layer.begin(), layer.end(), true);\n  }\n\n  return true;\n}\n\ntemplate <class DATA_T>\nvoid ThreeDTextureHelper<DATA_T>::copyDataToGPU(int index, bool sync)\n{\n  TextureParams<DATA_T>* param = &this->textures_[index];\n  auto copy_vec = layer_copy_[index].begin();\n\n  int w = param->extent.width;\n  int h = param->extent.height;\n  int d = param->extent.depth;\n\n  cudaMemcpy3DParms params = { 0 };\n  params.srcPtr = make_cudaPitchedPtr(this->cpu_values_[index].data(), w * sizeof(DATA_T), w, h);\n  params.dstArray = param->array_d;\n  params.kind = cudaMemcpyHostToDevice;\n  params.dstPos = make_cudaPos(0, 0, 0);\n  params.srcPos = make_cudaPos(0, 0, 0);\n\n  // TODO check if we just need to copy it all and do that\n\n  // current index of z we are looking at\n  int cur_z_index = 0;\n  int prev_z_pos = -1;\n\n  // TODO since we cannot have an extent of zero depth this is fine\n  while (cur_z_index + 1 < d)\n  {\n    if (*(copy_vec + 1) and *copy_vec)\n    {\n      // if next index is true and cur is true keep building up copy\n      copy_vec++;\n      cur_z_index++;\n      continue;\n    }\n    else if (!*(copy_vec + 1) and *copy_vec)\n    {\n      // if the next one is false and current is true begin a copy\n      params.extent = make_cudaExtent(w, h, cur_z_index - prev_z_pos);\n\n      HANDLE_ERROR(cudaMemcpy3DAsync(&params, this->stream_));\n      prev_z_pos = cur_z_index;\n    }\n    else if (*(copy_vec + 1) and !*copy_vec)\n    {\n      // if the next one is true and cur is false start building up copy\n      params.dstPos = make_cudaPos(0, 0, cur_z_index + 1);\n      params.srcPos = make_cudaPos(0, 0, cur_z_index + 1);\n\n      prev_z_pos = cur_z_index;\n    }\n\n    // increment counters\n    copy_vec++;\n    cur_z_index++;\n  }\n\n  // execute whatever copy is left\n  if (prev_z_pos + 1 != cur_z_index)\n  {\n    params.extent = make_cudaExtent(w, h, cur_z_index - prev_z_pos);\n    HANDLE_ERROR(cudaMemcpy3DAsync(&params, this->stream_));\n  }\n\n  for (std::vector<bool>& layer : layer_copy_)\n  {\n    std::fill(layer.begin(), layer.end(), false);\n  }\n\n  if (sync)\n  {\n    cudaStreamSynchronize(this->stream_);\n  }\n\n  param->update_data = false;\n}\n"
  },
  {
    "path": "include/mppi/utils/texture_helpers/three_d_texture_helper.cuh",
    "content": "//\n// Created by jason on 1/10/22.\n//\n\n#ifndef MPPIGENERIC_THREE_D_TEXTURE_HELPER_CUH\n#define MPPIGENERIC_THREE_D_TEXTURE_HELPER_CUH\n\n#include <mppi/utils/texture_helpers/texture_helper.cuh>\n\n// TODO needs to 3D wrap around angles, i.e set 3D to wrap\n\ntemplate <class DATA_T>\nclass ThreeDTextureHelper : public TextureHelper<ThreeDTextureHelper<DATA_T>, DATA_T>\n{\npublic:\n  ThreeDTextureHelper(int number, bool synced = false, cudaStream_t stream = 0);\n\n  void allocateCudaTexture(int index) override;\n\n  void updateTexture(const int index, const int z_index, std::vector<DATA_T>& data, bool column_major = false);\n  void updateTexture(const int index, const int z_index,\n                     const Eigen::Ref<const Eigen::Matrix<DATA_T, Eigen::Dynamic, Eigen::Dynamic>, 0,\n                                      Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>\n                         values,\n                     bool column_major = true);\n  bool setExtent(int index, cudaExtent& extent) override;\n  void copyDataToGPU(int index, bool sync = false) override;\n\n  std::vector<std::vector<bool>> getLayerCopy()\n  {\n    return layer_copy_;\n  }\n\n  __host__ __device__ DATA_T queryTexture(const int index, const float3& point);\n\nprotected:\n  // cpu values\n  // layer -> 3D array -> actual texture lookup thing\n  std::vector<std::vector<bool>> layer_copy_;  // indicator what 2D part of the 3D array needs to be copied over\n\n  // if we should require every depth to be updated before sending to GPU\n  bool synched_ = false;\n};\n\n#if __CUDACC__\n#include \"three_d_texture_helper.cu\"\n#endif\n\n#endif  // MPPIGENERIC_THREE_D_TEXTURE_HELPER_CUH\n"
  },
  {
    "path": "include/mppi/utils/texture_helpers/two_d_texture_helper.cu",
    "content": "//\n// Created by jason on 1/5/22.\n//\n\n#include \"two_d_texture_helper.cuh\"\n\ntemplate <class DATA_T>\nvoid TwoDTextureHelper<DATA_T>::allocateCudaTexture(int index)\n{\n  TextureHelper<TwoDTextureHelper<DATA_T>, DATA_T>::allocateCudaTexture(index);\n\n  TextureParams<DATA_T>* param = &this->textures_[index];\n\n  int w = param->extent.width;\n  int h = param->extent.height;\n\n  HANDLE_ERROR(cudaMallocArray(&(param->array_d), &(param->channelDesc), w, h));\n}\n\ntemplate <class DATA_T>\nvoid TwoDTextureHelper<DATA_T>::updateTexture(const int index, std::vector<DATA_T>& values, bool column_major)\n{\n  TextureParams<DATA_T>* param = &this->textures_buffer_[index];\n  int w = param->extent.width;\n  int h = param->extent.height;\n\n  // check that the sizes are correct\n  if (values.size() != w * h)\n  {\n    throw std::runtime_error(std::string(\"Error: invalid size to updateTexture \") + std::to_string(values.size()) +\n                             \" != \" + std::to_string(w * h));\n  }\n\n  // copy over values to cpu side holder\n  this->cpu_buffer_values_[index].resize(w * h);\n  if (column_major)\n  {\n    for (int j = 0; j < w; j++)\n    {\n      for (int i = 0; i < h; i++)\n      {\n        int columnMajorIndex = j * h + i;\n        int rowMajorIndex = i * w + j;\n        this->cpu_buffer_values_[index][rowMajorIndex] = values[columnMajorIndex];\n      }\n    }\n  }\n  else\n  {\n    // std::copy(values.begin(), values.end(), cpu_buffer_values_[index].begin());\n    this->cpu_buffer_values_[index] = std::move(values);\n  }\n  // tells the object to copy it over next time that happens\n  param->update_data = true;\n}\n\ntemplate <class DATA_T>\nvoid TwoDTextureHelper<DATA_T>::updateTexture(const int index, std::vector<DATA_T>& data, cudaExtent& extent,\n                                              bool column_major)\n{\n  setExtent(index, extent);\n  updateTexture(index, data, column_major);\n}\n\ntemplate <class DATA_T>\n__host__ __device__ DATA_T TwoDTextureHelper<DATA_T>::queryTexture(const int index, const float3& point)\n{\n#ifdef __CUDA_ARCH__\n  return tex2D<DATA_T>(this->textures_d_[index].tex_d, point.x, point.y);\n#else\n  return queryTextureCPU(index, point);\n#endif\n}\n\ntemplate <class DATA_T>\nbool TwoDTextureHelper<DATA_T>::setExtent(int index, cudaExtent& extent)\n{\n  if (extent.depth != 0)\n  {\n    throw std::runtime_error(std::string(\"Error: extent in setExtent invalid,\"\n                                         \" cannot use depth != 0 in 2D texture: using \") +\n                             std::to_string(extent.depth));\n  }\n\n  return TextureHelper<TwoDTextureHelper<DATA_T>, DATA_T>::setExtent(index, extent);\n}\n\ntemplate <class DATA_T>\nvoid TwoDTextureHelper<DATA_T>::copyDataToGPU(int index, bool sync)\n{\n  TextureParams<DATA_T>* param = &this->textures_[index];\n  int w = param->extent.width;\n  int h = param->extent.height;\n  HANDLE_ERROR(cudaMemcpy2DToArrayAsync(param->array_d, 0, 0, this->cpu_values_[index].data(), w * sizeof(DATA_T),\n                                        w * sizeof(DATA_T), h, cudaMemcpyHostToDevice, this->stream_));\n  if (sync)\n  {\n    cudaStreamSynchronize(this->stream_);\n  }\n  param->update_data = false;\n}\n\ntemplate <class DATA_T>\nvoid TwoDTextureHelper<DATA_T>::updateTexture(\n    const int index,\n    const Eigen::Ref<const Eigen::Matrix<DATA_T, Eigen::Dynamic, Eigen::Dynamic>, 0,\n                     Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>\n        values,\n    bool column_major)\n{\n  TextureParams<DATA_T>* param = &this->textures_buffer_[index];\n  int w = param->extent.width;\n  int h = param->extent.height;\n  this->cpu_buffer_values_[index].resize(w * h);\n\n  if (column_major)\n  {\n    // if we are column major transform to row major\n    for (int j = 0; j < w; j++)\n    {\n      for (int i = 0; i < h; i++)\n      {\n        int columnMajorIndex = j * h + i;\n        int rowMajorIndex = i * w + j;\n        this->cpu_buffer_values_[index][rowMajorIndex] = values.data()[columnMajorIndex];\n      }\n    }\n  }\n  else\n  {\n    // if we row major copy directly\n    memcpy(this->cpu_buffer_values_[index].data(), values.data(), values.size() * sizeof(DATA_T));\n  }\n  // tells the object to copy it over next time that happens\n  param->update_data = true;\n}\n\ntemplate <class DATA_T>\nvoid TwoDTextureHelper<DATA_T>::updateTexture(\n    int index,\n    const Eigen::Ref<const Eigen::Matrix<DATA_T, Eigen::Dynamic, Eigen::Dynamic>, 0,\n                     Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>\n        values,\n    cudaExtent& extent, bool column_major)\n{\n  setExtent(index, extent);\n  updateTexture(index, values, column_major);\n}\n\ntemplate <class DATA_T>\nDATA_T TwoDTextureHelper<DATA_T>::queryTextureCPU(const int index, const float3& point)\n{\n  TextureParams<DATA_T>* param = &this->textures_[index];\n\n  // convert normalized to array index\n  float2 query = make_float2(point.x * param->extent.width, point.y * param->extent.height);\n\n  // we subtract half a grid cell since the elevation map is the elevation at the center of the grid cell\n  query.x = query.x - 0.5f;\n  query.y = query.y - 0.5f;\n  // if (this->cpu_values_[index].size() < param->extent.width * param->extent.height)\n  // {\n  //   return DATA_T();\n  // }\n  if (param->texDesc.addressMode[0] == cudaAddressModeClamp)\n  {\n    if (query.x > param->extent.width - 1)\n    {\n      query.x = param->extent.width - 1;\n    }\n    else if (query.x <= 0.0)\n    {\n      query.x = 0.0;\n    }\n  }\n  else if (param->texDesc.addressMode[0] == cudaAddressModeBorder)\n  {\n    if (query.x > param->extent.width - 1 || query.x <= 0.0)\n    {\n      return createPartialCudaTuple<DATA_T>(param->texDesc.borderColor[0], param->texDesc.borderColor[1],\n                                            param->texDesc.borderColor[2], param->texDesc.borderColor[3]);\n    }\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported address mode on the CPU in texture utils\"));\n  }\n  if (param->texDesc.addressMode[1] == cudaAddressModeClamp)\n  {\n    if (query.y > param->extent.height - 1)\n    {\n      query.y = param->extent.height - 1;\n    }\n    else if (query.y <= 0.0)\n    {\n      query.y = 0.0;\n    }\n  }\n  else if (param->texDesc.addressMode[1] == cudaAddressModeBorder)\n  {\n    if (query.y > param->extent.height - 1 || query.y <= 0.0)\n    {\n      return createPartialCudaTuple<DATA_T>(param->texDesc.borderColor[0], param->texDesc.borderColor[1],\n                                            param->texDesc.borderColor[2], param->texDesc.borderColor[3]);\n    }\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported address mode on the CPU in texture utils\"));\n  }\n  int w = param->extent.width;\n  if (param->texDesc.filterMode == cudaFilterModeLinear)\n  {\n    // the value is distributed evenly in the space starting at half a cell from 0.0\n    int x_min = std::min((int)std::floor(query.x), w - 2);\n    int x_max = x_min + 1;\n    int y_min = std::min((int)std::floor(query.y), (int)param->extent.height - 2);\n    int y_max = y_min + 1;\n\n    // does bilinear interpolation https://en.wikipedia.org/wiki/Bilinear_interpolation\n\n    DATA_T Q_11 = this->cpu_values_[index][y_min * w + x_min];\n    DATA_T Q_12 = this->cpu_values_[index][y_min * w + x_max];\n    DATA_T Q_21 = this->cpu_values_[index][y_max * w + x_min];\n    DATA_T Q_22 = this->cpu_values_[index][y_max * w + x_max];\n\n    DATA_T y_min_interp = Q_11 * ((x_max - query.x) / (x_max - x_min)) + Q_12 * ((query.x - x_min) / (x_max - x_min));\n    DATA_T y_max_interp = Q_21 * ((x_max - query.x) / (x_max - x_min)) + Q_22 * ((query.x - x_min) / (x_max - x_min));\n\n    DATA_T result =\n        y_min_interp * ((y_max - query.y) / (y_max - y_min)) + y_max_interp * ((query.y - y_min) / (y_max - y_min));\n\n    // does the actual interpolation\n    return result;\n  }\n  else if (param->texDesc.filterMode == cudaFilterModePoint)\n  {\n    int rowMajorIndex = std::round(query.y) * w + std::round(query.x);\n    return this->cpu_values_[index][rowMajorIndex];\n  }\n  else\n  {\n    throw std::runtime_error(std::string(\"using unsupported filter mode on the CPU in texture utils\"));\n  }\n}\n"
  },
  {
    "path": "include/mppi/utils/texture_helpers/two_d_texture_helper.cuh",
    "content": "//\n// Created by jason on 1/5/22.\n//\n\n#ifndef MPPIGENERIC_TWO_D_TEXTURE_HELPER_CUH\n#define MPPIGENERIC_TWO_D_TEXTURE_HELPER_CUH\n\n#include <mppi/utils/texture_helpers/texture_helper.cuh>\n\ntemplate <class DATA_T>\nclass TwoDTextureHelper : public TextureHelper<TwoDTextureHelper<DATA_T>, DATA_T>\n{\npublic:\n  TwoDTextureHelper<DATA_T>(int number, cudaStream_t stream = 0)\n    : TextureHelper<TwoDTextureHelper<DATA_T>, DATA_T>(number, stream)\n  {\n  }\n\n  void allocateCudaTexture(int index) override;\n\n  void updateTexture(int index,\n                     const Eigen::Ref<const Eigen::Matrix<DATA_T, Eigen::Dynamic, Eigen::Dynamic>, 0,\n                                      Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>,\n                     cudaExtent& extent, bool column_major = true);\n  void updateTexture(const int index,\n                     const Eigen::Ref<const Eigen::Matrix<DATA_T, Eigen::Dynamic, Eigen::Dynamic>, 0,\n                                      Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>,\n                     bool column_major = true);\n  void updateTexture(const int index, std::vector<DATA_T>& data, bool column_major = false);\n  void updateTexture(const int index, std::vector<DATA_T>& data, cudaExtent& extent, bool column_major = false);\n  bool setExtent(int index, cudaExtent& extent) override;\n  void copyDataToGPU(int index, bool sync = false);\n\n  __host__ __device__ DATA_T queryTexture(const int index, const float3& point);\n  DATA_T queryTextureCPU(const int index, const float3& point);\n\nprotected:\n};\n\n#if __CUDACC__\n#include \"two_d_texture_helper.cu\"\n#endif\n\n#endif  // MPPIGENERIC_TWO_D_TEXTURE_HELPER_CUH\n"
  },
  {
    "path": "include/mppi/utils/type_printing.h",
    "content": "/**\n * C++ 11 solution to print out variable type\n * Copied from:\n * https://stackoverflow.com/questions/81870/is-it-possible-to-print-a-variables-type-in-standard-c\n * Created by Bogdan on 1/12/2020\n */\n#ifndef UTILS_TYPE_PRINTING_H_\n#define UTILS_TYPE_PRINTING_H_\n\n#include <type_traits>\n#include <typeinfo>\n#ifndef _MSC_VER\n#include <cxxabi.h>\n#endif\n#include <memory>\n#include <string>\n#include <cstdlib>\n\n// tldr use TYPE(variable) to get std::string of the variable's type\n#ifndef TYPE\n#define TYPE(a) type_name<decltype(a)>()\n#endif\n\ntemplate <class T>\nstd::string type_name()\n{\n  typedef typename std::remove_reference<T>::type TR;\n  std::unique_ptr<char, void (*)(void*)> own(\n#ifndef _MSC_VER\n      abi::__cxa_demangle(typeid(TR).name(), nullptr, nullptr, nullptr),\n#else\n      nullptr,\n#endif\n      std::free);\n  std::string r = own != nullptr ? own.get() : typeid(TR).name();\n  if (std::is_const<TR>::value)\n    r += \" const\";\n  if (std::is_volatile<TR>::value)\n    r += \" volatile\";\n  if (std::is_lvalue_reference<T>::value)\n    r += \"&\";\n  else if (std::is_rvalue_reference<T>::value)\n    r += \"&&\";\n  return r;\n}\n\n#endif /* UTILS_TYPE_PRINTING_H_*/\n"
  },
  {
    "path": "include/mppi/version.h.in",
    "content": "/**\n * @file version.h\n * @brief Templated File for MPPI-Generic Version\n * @author Bogdan Vlahov\n * @version 0.0.1\n * @date 2026-04-14\n */\n#pragma once\n\n#define MPPI_GENERIC_VERSION @PROJECT_VERSION@\n#define MPPI_GENERIC_VERSION_STRING \"@PROJECT_VERSION@\"\n\n#define MPPI_GENERIC_VERSION_MAJOR @PROJECT_VERSION_MAJOR@\n#define MPPI_GENERIC_VERSION_MINOR @PROJECT_VERSION_MINOR@\n#define MPPI_GENERIC_VERSION_PATCH @PROJECT_VERSION_PATCH@\n"
  },
  {
    "path": "resources/PF_1000_cell_init.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:ec9c111b7d37c3074b4b4a19adf63585a1d5b333ddda4af216a562693ad492d1\nsize 657158\n"
  },
  {
    "path": "resources/PF_1000_hidden_init.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:5df4063baca04f0079b94e8c997047722e56d8f12a5de8229274acc6a1340b5d\nsize 657158\n"
  },
  {
    "path": "resources/PF_1000_lstm.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:375d75e3276e9fa020d0c33c4c7b2a8fc7af5ae93021942488ffc2234262b7f2\nsize 12078\n"
  },
  {
    "path": "resources/PF_1000_output.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:6b26b5ec02d2f88b314995819e624d52c0444764a804ac3f334ed74fe4155911\nsize 1042\n"
  },
  {
    "path": "resources/ackerman_test.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:6a7a4299fc0f03eff7e7caba21247f70f892f2b4705076ff485a9bea2064442f\nsize 14734540\n"
  },
  {
    "path": "resources/autorally_nnet_09_12_2018.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:fc67d5a365e513c65ecb5c38c93a2f227dade9ec5c092e3c63878f95e8865694\nsize 12722\n"
  },
  {
    "path": "resources/bicycle_slip_hybrid.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:bcd2fbb7262ef3b35637027c15ad7904b02e265ccb9ed8d1b54e73c884f8eb15\nsize 4512514\n"
  },
  {
    "path": "resources/bicycle_slip_hybrid_test.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:75f53e00e525d0b2faad589c6776e33e09f338ce69fbb06e46b624683580f0e9\nsize 4512514\n"
  },
  {
    "path": "resources/bicycle_slip_kinematic_test.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:594e9b4fa900438043b7019923b05a730e7c5add894a2e85c3f0a22c0876a3f3\nsize 26482688\n"
  },
  {
    "path": "resources/bicycle_slip_test.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:c3acbdeff79819386ccb0e02fd84845388a784ad34b8e6023a1dc0d7e1bff2fc\nsize 18634406\n"
  },
  {
    "path": "resources/body_loss_bicycle_slip_kinematic_2023_03_10-12_54_39.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:74dabe9f4d367a6112132efc36a2dbaf5d004da8fa0ef12a863108cf8ed578e3\nsize 4505014\n"
  },
  {
    "path": "resources/ccrf_track.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:546f8e88874311082a65fde8d3ebf707bab0f8369746d5ff9b7f725f2d9f1528\nsize 4321310\n"
  },
  {
    "path": "resources/cell_init.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:479e450b1dcca2791941b1d50ce285ce7345f5a7ffb5c1133f9bc52887e6ca08\nsize 132358\n"
  },
  {
    "path": "resources/hidden_init.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:93965332fd41d8e37d47dd79bbb4755f4a5a6f719a7a55165407722b230eb2d9\nsize 132358\n"
  },
  {
    "path": "resources/lstm.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:3135fec388eab4fefec41948a9f0cbff0d11c14657992f26350ed1e1013691f7\nsize 12078\n"
  },
  {
    "path": "resources/lstm_lstm_ackerman.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:3ecf222b541fe8767dd835792dc6f06e7878d58e1f216877c082898969ff5302\nsize 1021466\n"
  },
  {
    "path": "resources/lstm_lstm_bicycle_slip_kinematic.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:7de7bce16b34fb3f89cc1b53cbd391053eb757b476230b3055438b9510a58714\nsize 4505014\n"
  },
  {
    "path": "resources/lstm_lstm_steering.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:83aef869ea23e01cb0a33abcae179ad1baa83d24bc2c60665c59b17950f71e73\nsize 195760\n"
  },
  {
    "path": "resources/lstm_lstm_steering_accel.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:c65b093c690ea938e7ae6e49e64dba7f3b042b67027149a46a020d95852dc8a0\nsize 2489169\n"
  },
  {
    "path": "resources/lstm_lstm_test.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:3c0a405fc6db14efb3bc736cfc9930dd7bd66faf42f4d7f3941a0ccd185d8388\nsize 200797\n"
  },
  {
    "path": "resources/network_rde_2024_03_22-19_00_26.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:77a37ae6d963904c20892c9e4f5f4180f4b5641b0396ad66cc6273d41c474c35\nsize 18252124\n"
  },
  {
    "path": "resources/network_rde_test.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:e50bf425bcf93de7ba80b0d3cc49eb8ec8ae3bd6255c149efa8e2472614849c0\nsize 18250976\n"
  },
  {
    "path": "resources/output.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:87285415580e5dc351d3c315270750c88af9b158f93b24f4f1c0407cd77452d5\nsize 1042\n"
  },
  {
    "path": "resources/sim_PF_200_cell_init.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:dcd107ddbf3189ae984f05c0f40f88edfdbc834898840548e782e6698b59825a\nsize 132358\n"
  },
  {
    "path": "resources/sim_PF_200_hidden_init.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:fa86275cfccfec3dd7483fed63422b081f28f9e31f8db8acb5d0a978fb306f13\nsize 132358\n"
  },
  {
    "path": "resources/sim_PF_200_lstm.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:41952b66ccaf1084665ec69925d7a1ff6f6ddaec8423b556b194e1cde3b4c7d0\nsize 12078\n"
  },
  {
    "path": "resources/sim_PF_200_output.npz",
    "content": "version https://git-lfs.github.com/spec/v1\noid sha256:ae29fa9fc96be8ac44efb3451f7be332d52d2dffd3ea5a34fff97cc49e4669d0\nsize 1042\n"
  },
  {
    "path": "scripts/autorally/lstm_converter.py",
    "content": "# reads in an LSTM npz and saves it out so that we can read it with cnpy\nimport torch\nimport numpy as np\nimport pickle\nfrom torch import Tensor\nfrom torch.nn.parameter import Parameter\n\n#filename = \"/home/jason/Documents/research/MPPI-Generic-Model-Learning/Results/train/autorally/PF_1000_integrated_fixed_1/LSTM_1X15-1000X10_0.02-0_I/model_store/LSTM_1X15-1000X10_0.02-0_I_37_1.npz\"\nfilename = \"/home/jason/Documents/research/MPPI-Generic-Model-Learning/Results/train/autorally/PF_200_fixed/LSTM_1X15-200X10_0.02-0_I/LSTM_1X15-200X10_0.02-0_I.npz\"\n\n\nnetwork = np.load(open(filename, 'rb'), allow_pickle=True)\nhidden_init = network[\"hidden_init\"].item()\ncell_init = network[\"cell_init\"].item()\noutput = network[\"output\"].item()\nlstm = network[\"lstm\"].item()\n\nnp.savez(\"PF_200_hidden_init.npz\", **hidden_init)\nnp.savez(\"PF_200_cell_init.npz\", **cell_init)\nnp.savez(\"PF_200_output.npz\", **output)\nnp.savez(\"PF_200_lstm.npz\", **lstm)\n\ntest = np.load(open(\"/home/jason/catkin_ws/ar/src/MPPI-ROS/\"\n                    \"submodules/MPPI-Generic/resources/hidden_init.npz\", 'rb'), allow_pickle=True)\nprint(test)\nprint(test.keys())\nprint(test[\"dynamics_W1\"])\n\ntest = np.load(open(\"PF_1000_hidden_init.npz\", 'rb'), allow_pickle=True)\nprint(test)\nprint(test.keys())\nprint(test[\"dynamics_W1\"])\n\n#np.savez(filename, **pa)\n"
  },
  {
    "path": "scripts/autorally/test/generateTestMaps.py",
    "content": "#!/usr/bin/env python3\n\nimport numpy as np\nfrom PIL import Image\nimport math\nimport argparse\n\ndef genLoadTrackDataTestMap(args):\n\n    # used to check if the map is being loaded correctly\n    width = 10\n    height = 20\n    pixelsPerMeter = 2\n\n    channel0 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel1 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel2 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel3 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n\n    counter = 0\n    for i in range(0, width*pixelsPerMeter):\n        for j in range(0, height*pixelsPerMeter):\n            # TODO this is actually flipped, but loaded into CUDA correctly\n            #print(\"putting width and height, \", pixelsPerMeter * height * i + j, \" \", counter)\n            counter+= 1\n            channel0[i, j] = counter\n            channel1[i, j] = counter * 10\n            channel2[i, j] = counter * 100\n            channel3[i, j] = counter * 1000\n    #print(channel0.flatten())\n\n    #Save data to numpy array, each channel is saved individually as an array in row major order.\n    track_dict = {\"xBounds\":np.array([-width/2, width/2], dtype = np.float32),\n                  \"yBounds\":np.array([-height/2, height/2], dtype = np.float32),\n                  \"pixelsPerMeter\":np.array([pixelsPerMeter], dtype=np.float32),\n                  \"channel0\":channel0.flatten(),\n                  \"channel1\":channel1.flatten(),\n                  \"channel2\":channel2.flatten(),\n                  \"channel3\":channel3.flatten()}\n\n\n    np.savez(args.output+\"track_map.npz\", **track_dict)\n\n    # used to check for valid outputs of the AutoRally standard cost function\n    # dims match the standard ones used to marietta track\n    width = 30\n    height = 30\n    pixelsPerMeter = 20\n\n    channel0 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel1 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel2 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel3 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n\n    for i in range(0, width*pixelsPerMeter):\n        for j in range(0, height*pixelsPerMeter):\n            x = (j*1.0)/pixelsPerMeter\n            y = (i*1.0)/pixelsPerMeter\n            channel0[i, j] = abs(height/2.0 - y) + ((x)/(width))\n            #if ((x == 12.5 or x == 13.5) and y == 0):\n            #print(x,y, \" = \", channel0[i,j])\n            #if channel0[i, j] < 1.0:\n            #    print(\"putting width and height \",i , \" \", j, \" = \", channel0[i,j])\n    #print(channel0.flatten())\n\n    track_dict = {\"xBounds\":np.array([-13, 17], dtype = np.float32),\n                  \"yBounds\":np.array([-10, 20], dtype = np.float32),\n                  \"pixelsPerMeter\":np.array([pixelsPerMeter], dtype=np.float32),\n                  \"channel0\":channel0.flatten(),\n                  \"channel1\":channel1.flatten(),\n                  \"channel2\":channel2.flatten(),\n                  \"channel3\":channel3.flatten()}\n\n    np.savez(args.output+\"track_map_standard.npz\", **track_dict)\n\n    # used to check for valid outputs of the AutoRally robust cost function\n    # dims match the standard ones used to CCRF track\n    width = 70\n    height = 55\n    pixelsPerMeter = 20\n\n    channel0 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel1 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel2 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n    channel3 = np.zeros((width*pixelsPerMeter, height*pixelsPerMeter), np.float32)\n\n    for i in range(0, width*pixelsPerMeter):\n        for j in range(0, height*pixelsPerMeter):\n            x = (j*1.0)/pixelsPerMeter\n            y = (i*1.0)/pixelsPerMeter\n            channel1[i, j] = abs(height/2.0 - y) + ((x)/(width))\n            if x > 50 or x < 15:\n                channel0[i,j] = 1.0\n            elif x > 40 or x < 25:\n                channel0[i,j] = 0.6\n            channel2[i,j] = x\n            channel3[i,j] = math.atan2(y,x)\n            #print(\"putting x,y = \",x , \" \", y, \" = \", channel0[i,j], channel1[i,j], channel2[i,j], channel3[i,j])\n    #print(channel3.flatten())\n\n    track_dict = {\"xBounds\":np.array([-25, 45], dtype = np.float32),\n                  \"yBounds\":np.array([-50, 5], dtype = np.float32),\n                  \"pixelsPerMeter\":np.array([pixelsPerMeter], dtype=np.float32),\n                  \"channel0\":channel0.flatten(),\n                  \"channel1\":channel1.flatten(),\n                  \"channel2\":channel2.flatten(),\n                  \"channel3\":channel3.flatten()}\n\n    np.savez(args.output+\"track_map_robust.npz\", **track_dict)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Creates a npz map file for testing purposes\")\n    # parser.add_argument(\"-i\", \"--input\", type = str, help = \"Costmap in old .txt format\")\n    # parser.add_argument(\"-d\", \"--display\", type = str,\n    #                     help = \"Name of image to save costmap as\",\n    #                     default=\"display_image.jpg\")\n    parser.add_argument(\"-o\", \"--output\", type = str,\n                        help = \"File to save map to\",\n                        default = \"../../../resource/autorally/test/\")\n    args = parser.parse_args()\n    genLoadTrackDataTestMap(args)\n"
  },
  {
    "path": "scripts/autorally/test/generateTestNetwork.py",
    "content": "#!/usr/bin/env python3\n\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport argparse\n\ndef genLoadNetworkDataTest(args):\n    param_dict = {}\n    layer_counter = 0\n\n    net_structure = [6, 32, 32, 4]\n    params = 0\n    for i in range(1, len(net_structure)):\n        inc = net_structure[i-1] * net_structure[i]\n        print(\"param  = \", params, \" inc = \", inc)\n        param_dict[\"dynamics_W\" + str(i)] = np.arange(params, params + inc, dtype=np.float64)\n        params += inc\n        inc = net_structure[i]\n        print(\"param  = \", params, \" inc = \", inc)\n        param_dict[\"dynamics_b\" + str(i)] = np.arange(params, params + inc, dtype=np.float64)\n        params += inc\n    print(param_dict)\n\n    np.savez(args.output+\"/neuralNetLoadTest.npz\", **param_dict)\n\ndef genComputationNetworkTest(args):\n    param_dict = {}\n\n    net_structure = [4, 3, 4]\n    for i in range(1, len(net_structure)):\n        param_dict[\"dynamics_W\" + str(i)] = 1\n        param_dict[\"dynamics_b\" + str(i)] = 1\n    print(param_dict)\n\n    np.savez(args.output+\"/neuralNetComputeTest.npz\", **param_dict)\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description=\"Creates a npz file for testing the NN\")\n    # parser.add_argument(\"-i\", \"--input\", type = str, help = \"Costmap in old .txt format\")\n    # parser.add_argument(\"-d\", \"--display\", type = str,\n    #                     help = \"Name of image to save costmap as\",\n    #                     default=\"display_image.jpg\")\n    parser.add_argument(\"-o\", \"--output\", type = str,\n                        help = \"File to save map to\",\n                        default = \"../../../resource/autorally/test/\")\n    args = parser.parse_args()\n    genLoadNetworkDataTest(args)\n    genComputationNetworkTest(args)\n"
  },
  {
    "path": "scripts/colored_noise.py",
    "content": "\"\"\"Generate colored noise.\"\"\"\n\"\"\"Source: https://github.com/felixpatzelt/colorednoise/blob/master/colorednoise.py\"\"\"\nfrom numpy import sqrt, newaxis\nfrom numpy.fft import irfft, rfftfreq\nfrom numpy.random import normal\nfrom numpy import sum as npsum\n\n\ndef powerlaw_psd_gaussian(exponent, size, fmin=0):\n    \"\"\"Gaussian (1/f)**beta noise.\n\n    Based on the algorithm in:\n    Timmer, J. and Koenig, M.:\n    On generating power law noise.\n    Astron. Astrophys. 300, 707-710 (1995)\n\n    Normalised to unit variance\n\n    Parameters:\n    -----------\n\n    exponent : float\n        The power-spectrum of the generated noise is proportional to\n\n        S(f) = (1 / f)**beta\n        flicker / pink noise:   exponent beta = 1\n        brown noise:            exponent beta = 2\n\n        Furthermore, the autocorrelation decays proportional to lag**-gamma\n        with gamma = 1 - beta for 0 < beta < 1.\n        There may be finite-size issues for beta close to one.\n\n    shape : int or iterable\n        The output has the given shape, and the desired power spectrum in\n        the last coordinate. That is, the last dimension is taken as time,\n        and all other components are independent.\n\n    fmin : float, optional\n        Low-frequency cutoff.\n        Default: 0 corresponds to original paper. It is not actually\n        zero, but 1/samples.\n\n    Returns\n    -------\n    out : array\n        The samples.\n\n\n    Examples:\n    ---------\n\n    # generate 1/f noise == pink noise == flicker noise\n    >>> import colorednoise as cn\n    >>> y = cn.powerlaw_psd_gaussian(1, 5)\n    \"\"\"\n\n    # Make sure size is a list so we can iterate it and assign to it.\n    try:\n        size = list(size)\n    except TypeError:\n        size = [size]\n\n    # The number of samples in each time series\n    samples = size[-1]\n\n    # Calculate Frequencies (we asume a sample rate of one)\n    # Use fft functions for real output (-> hermitian spectrum)\n    f = rfftfreq(samples)\n\n    # Build scaling factors for all frequencies\n    s_scale = f\n    fmin = max(fmin, 1./samples) # Low frequency cutoff\n    ix   = npsum(s_scale < fmin)   # Index of the cutoff\n    if ix and ix < len(s_scale):\n        s_scale[:ix] = s_scale[ix]\n    s_scale = s_scale**(-exponent/2.)\n\n    # Calculate theoretical output standard deviation from scaling\n    w      = s_scale[1:].copy()\n    w[-1] *= (1 + (samples % 2)) / 2. # correct f = +-0.5\n    sigma = 2 * sqrt(npsum(w**2)) / samples\n    # Adjust size to generate one Fourier component per frequency\n    size[-1] = len(f)\n\n    # Add empty dimension(s) to broadcast s_scale along last\n    # dimension of generated random power + phase (below)\n    dims_to_add = len(size) - 1\n    s_scale     = s_scale[(newaxis,) * dims_to_add + (Ellipsis,)]\n    # Generate scaled random power + phase\n    sr = normal(scale=s_scale, size=size)\n    si = normal(scale=s_scale, size=size)\n    # If the signal length is even, frequencies +/- 0.5 are equal\n    # so the coefficient must be real.\n    if not (samples % 2): si[...,-1] = 0\n\n    # Regardless of signal length, the DC component must be real\n    si[...,0] = 0\n\n    # Combine power + corrected phase to Fourier components\n    s  = sr + 1J * si\n\n    # Transform to real time series & scale to unit variance\n    y = irfft(s, n=samples, axis=-1) / sigma\n\n    return y\n\nif __name__ == \"__main__\":\n    y = powerlaw_psd_gaussian(1, 20, 0.07)\n    print(\"final samples:\\n{}\".format(y))\n"
  },
  {
    "path": "scripts/double_integrator/generate_free_energy_video.py",
    "content": "import matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.animation as animation\nimport numpy as np\nfrom matplotlib import rc\nimport argparse\n\ncontroller_dict = {'v': 'vanilla_large_',\n                   't': 'tube_',\n                   'rs': 'robust_sc_',\n                   'rr': 'robust_rc_',\n                   'rhv': 'test_mppi_',\n                   'rht': 'test_tmppi_',\n                   'rhr': 'test_rmppi_',\n                   'cc':  'robust_cc_',\n                   'vr': 'vanilla_large_robust_',\n                   'tr': 'tube_robust_'}\n\ntitle_dict = {'v': 'MPPI Standard Cost',\n                   't': 'Tube-MPPI Standard Cost',\n                   'rs': 'RMPPI LQR Standard Cost',\n                   'rr': 'RMPPI LQR Robust Cost',\n                   'rhv': 'MPPI Autorally',\n                   'rht': 'Tube-MPPI Autorally',\n                   'rhr': 'RMPPI Autorally',\n                   'cc': 'RMPPI CCM Robust Cost',\n                   'vr': 'MPPI Robust Cost',\n                   'tr': 'Tube-MPPI Robust Cost'}\n\nrc('font', **{'size': 30})\n\n\nfig, ax = plt.subplots()\nfig.set_dpi(100)\nfig.set_size_inches(10, 10)\nxdata, ydata, yndata = [], [], []\n\nax.set_ylabel('Log(Free Energy)',labelpad=-30, position=(0.5,.6))\nax.set_xlabel('Time (sec)')\nax.set_yscale('log')\n\n#ax.xaxis.set_ticklabels([])\ntitle = None\nln3, = ax.plot([], [], 'r', alpha=0.7, linewidth=3, label='Nominal')\nln2, = ax.plot([], [], 'b', alpha=0.7, linewidth=3, label='Real')\nln1, = ax.plot(np.linspace(0,1000,100), 1*np.ones(100), 'k', alpha=0.7, linewidth=3, label='Crash')\n\n\ndef init():\n    ax.set_xlim(0, time_window)\n    ax.set_ylim(ymin, ymax)\n    return ln1, ln2, ln3\n\n\ndef update(frame):\n    xdata.append(time[frame])\n    ydata.append(real_fe[frame])\n    yndata.append(nominal_fe[frame])\n    ln2.set_data(xdata, ydata)\n    ln3.set_data(xdata, yndata)\n    ax.legend(loc=2)\n    if (xdata[-1] - xdata[0]) > time_window:\n        xdata.pop(0)\n        ydata.pop(0)\n        yndata.pop(0)\n        ln1.set_data(xdata, 1)\n        ax.set_xlim(xdata[0], xdata[-1])\n    else:\n        ax.set_xlim(xdata[0], xdata[0]+time_window)\n\n    return ln1, ln2, ln3\n\n\ndef index_data(time, nominal_fe, real_fe, begin_time, end_time):\n    ind = tuple([begin_time < time])\n    time = time[ind]\n    real_fe = real_fe[ind]\n    nominal_fe = nominal_fe[ind]\n    ind = tuple([time < end_time])\n    time = time[ind]\n    real_fe = real_fe[ind]\n    nominal_fe = nominal_fe[ind]\n    return time, nominal_fe, real_fe\n\n\ndef main(args):\n    build_dir = args['build_dir']\n    data_dir = build_dir + 'examples/'\n    controller_name = controller_dict[args['controller']]\n    fps = int(args['fps'])\n\n    # Let us load the data first\n    global real_fe, nominal_fe, time_window, time, ymin, ymax\n\n    time_window = int(args['time_window'])\n\n    # Load the free energy data\n    if args['controller'] == 'v':\n        time = np.linspace(0.02, 0.02*5000, 5000)\n        real_fe = np.load(data_dir + controller_name + 'real_free_energy.npy')/1000\n        nominal_fe = -1*np.ones_like(real_fe)\n\n    elif args['controller'] == 'rhv':\n        data = np.load(data_dir + controller_name + 'real_free_energy.npz')\n        time = data['arr_0']\n        real_fe = data['arr_1']/10000\n        nominal_fe = -1*np.ones_like(real_fe)\n        data.close()\n        time, nominal_fe, real_fe = index_data(time, nominal_fe, real_fe, 150, 190)\n\n    elif args['controller'] == 'rht':\n        data = np.load(data_dir + controller_name + 'real_free_energy.npz')\n        time = data['arr_0']\n        real_fe = data['arr_1']/10000\n        data.close()\n        data = np.load(data_dir + controller_name + 'nominal_free_energy.npz')\n        nominal_fe = data['arr_1']/10000\n        data.close()\n        time, nominal_fe, real_fe = index_data(time, nominal_fe, real_fe, 275, 315)\n\n    elif args['controller'] == 'rhr':\n        data = np.load(data_dir + controller_name + 'real_free_energy.npz')\n        time = data['arr_0']\n        real_fe = data['arr_1']/125000\n        data.close()\n        data = np.load(data_dir + controller_name + 'nominal_free_energy.npz')\n        nominal_fe = data['arr_1']/125000\n        data.close()\n        time, nominal_fe, real_fe = index_data(time, nominal_fe, real_fe, 180, 220)\n    elif args['controller'] == 'cc':\n        time = np.linspace(0.02, 0.02*3000, 3000)\n        real_fe = np.load(data_dir + controller_name + 'real_free_energy.npy')/100\n        nominal_fe = np.load(data_dir + controller_name + 'nominal_free_energy.npy')/100\n    elif args['controller'] == 'vr':\n        time = np.linspace(0.02, 0.02*5000, 5000)\n        real_fe = np.load(data_dir + controller_name + 'real_free_energy.npy')/100\n        nominal_fe = -1*np.ones_like(real_fe)\n    elif args['controller'] == 'tr':\n        time = np.linspace(0.02, 0.02*5000, 5000)\n        real_fe = np.load(data_dir + controller_name + 'real_free_energy.npy')/100\n        nominal_fe = np.load(data_dir + controller_name + 'nominal_free_energy.npy')/100\n    elif not args['controller'] == 'rr':\n        time = np.linspace(0.02, 0.02*5000, 5000)\n        real_fe = np.load(data_dir + controller_name + 'real_free_energy.npy')/1000\n        nominal_fe = np.load(data_dir + controller_name + 'nominal_free_energy.npy')/1000\n    else:\n        time = np.linspace(0.02, 0.02*5000, 5000)\n        real_fe = np.load(data_dir + controller_name + 'real_free_energy.npy')/100\n        nominal_fe = np.load(data_dir + controller_name + 'nominal_free_energy.npy')/100\n    ax.set_title(title_dict[args['controller']])\n\n    if np.mean(nominal_fe) == -1:\n        ymin = np.amin(real_fe)\n    else:\n        ymin = min(np.amin(real_fe), np.amin(nominal_fe))\n    ymax = max(1.1,max(np.amax(real_fe), np.amax(nominal_fe)))\n\n    # Set up formatting for the movie files\n    Writer = animation.writers['ffmpeg']\n    writer = Writer(fps=fps, bitrate=-1)\n\n    ani = FuncAnimation(fig, update, frames=np.arange(0,time.shape[0],1),\n                    init_func=init, blit=False, interval=1000/fps, repeat=False)\n    if args['save_mp4']:\n        ani.save(title_dict[args['controller']] + '_free_energy' + '.mp4', writer=writer)\n    else:\n        print('Not saving mp4 file, pass --save_mp4=True if you want to save it')\n    plt.show()\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description = 'Say hello')\n    parser.add_argument('--build_dir', help='Location of MPPI-Generic build folder', required=True)\n    parser.add_argument('--controller', help=\"Which controller we are plotting\", required=True)\n    parser.add_argument('--time_window', help='Time window size (s)', required=False, default=5)\n    parser.add_argument('--fps', required=False, default=50)\n    parser.add_argument('--save_mp4', required=False, default=False)\n    args = vars(parser.parse_args())\n\n    main(args)\n"
  },
  {
    "path": "scripts/double_integrator/generate_trajectory_video.py",
    "content": "import matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.animation as animation\nimport numpy as np\nfrom matplotlib import rc\nimport argparse\n\ncontroller_dict = {'v': 'vanilla_large_',\n                   'vr': 'vanilla_large_robust_',\n                   't': 'tube_',\n                   'tr': 'tube_robust_',\n                   'rs': 'robust_sc_',\n                   'rr': 'robust_rc_',\n                   'cc':  'robust_cc_',\n                   'vr': 'vanilla_large_robust_',\n                   'tr': 'tube_robust_'}\n\ntitle_dict = {'v': 'MPPI Standard Cost',\n              't': 'Tube-MPPI Standard Cost',\n              'vr': 'MPPI Robust Cost',\n              'tr': 'Tube-MPPI Robust Cost',\n              'rs': 'RMPPI LQR Standard Cost',\n              'rr': 'RMPPI LQR Robust Cost',\n              'cc': 'RMPPI CCM Robust Cost'}\n\nrc('font', **{'size': 30})\n\n\ntrack_radius_outer = 2 + .125\ntrack_radius_inner = 2 - .125\n\n# Draw the bounds for the track\ntheta = np.linspace(0,2*np.pi, 1000)\nx_track_inner = track_radius_inner*np.cos(theta)\ny_track_inner = track_radius_inner*np.sin(theta)\nx_track_outer = track_radius_outer*np.cos(theta)\ny_track_outer = track_radius_outer*np.sin(theta)\n\nfig, ax = plt.subplots()\nfig.set_dpi(100)\nfig.set_size_inches(10, 10)\nxdata, ydata = [], []\nxndata, yndata = [], []\n\nax.plot(x_track_outer, y_track_outer, 'k', linewidth=2)\nax.plot(x_track_inner, y_track_inner, 'k', linewidth=2)\nax.axis('equal')\nln3, = ax.plot([],[], 'g', alpha=0.7, linewidth=2, label='Nominal Trajectory')\nln2, = ax.plot([], [], 'ro', alpha=0.5, label='Nominal State')\nln1, = ax.plot([], [], 'bo', alpha=0.5, label='Actual State')\nax.set_ylabel('Y Pos (m)')\nax.set_xlabel('X Pos (m)')\ntitle = ax.text(0.5,0.5, \"\", bbox={'facecolor':'w', 'alpha':0.5, 'pad':20},\n                transform=ax.transAxes, ha=\"center\", fontsize=30)\n\n\ndef init():\n    ax.set_xlim(-2.25, 2.25)\n    ax.set_ylim(-2.25, 2.25)\n    return ln1, ln2, ln3\n\n\ndef update(frame):\n    xndata.append(nominal_state[frame, 0])\n    yndata.append(nominal_state[frame, 1])\n    xdata.append(actual_state[frame, 0])\n    ydata.append(actual_state[frame, 1])\n    xnt_data = nominal_trajectory[frame,:,0]\n    ynt_data = nominal_trajectory[frame,:,1]\n    if len(xdata) > 20:\n        xdata.pop(0)\n        ydata.pop(0)\n        xndata.pop(0)\n        yndata.pop(0)\n    ln3.set_data(xnt_data, ynt_data)\n    ln2.set_data(xndata, yndata)\n    ln1.set_data(xdata, ydata)\n    # fig.legend()\n    title.set_text(\"Time: {val:05.2f} (sec)\".format(val=frame*0.02+0.02))\n    return ln1, ln2, ln3, title\n\n\ndef main(args):\n    build_dir = args['build_dir']\n    data_dir = build_dir + 'examples/'\n    controller_name = controller_dict[args['controller']]\n    fps = int(args['fps'])\n\n\n    # Let us load the data first\n    global actual_state, nominal_trajectory, nominal_state, real_fe, nominal_fe\n    actual_state = np.load(data_dir + controller_name + 'state_trajectory.npy')\n    nominal_trajectory = np.load(data_dir + controller_name + 'nominal_trajectory.npy')\n    nominal_state = nominal_trajectory[:,0,:]\n    ax.set_title(title_dict[args['controller']])\n\n    # Set up formatting for the movie files\n    Writer = animation.writers['ffmpeg']\n    writer = Writer(fps=fps, bitrate=-1)\n    ani = FuncAnimation(fig, update, frames=np.arange(0,actual_state.shape[0],1),\n                    init_func=init, blit=True, interval=1000/fps, repeat=False)\n    if args['save_mp4']:\n        ani.save(title_dict[args['controller']] + '_trajectory' + '.mp4', writer=writer)\n    else:\n        print('Not saving mp4 file, pass --save_mp4 if you want to save it')\n    # plt.show()\n\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description = 'Say hello')\n    parser.add_argument('--build_dir', help='Location of MPPI-Generic build folder', required=True)\n    parser.add_argument('--controller', help=\"Which controller we are plotting\", required=True)\n    parser.add_argument('--fps', required=False, default=50)\n    parser.add_argument('--save_mp4', required=False, default=False)\n    args = vars(parser.parse_args())\n\n    main(args)"
  },
  {
    "path": "scripts/double_integrator/plot_DI_test_trajectories.py",
    "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\n\ntrack_radius_outer = 2 + .125\ntrack_radius_inner = 2 - .125\n\n# Draw the bounds for the track\ntheta = np.linspace(0,2*np.pi, 1000)\nx_track_inner = track_radius_inner*np.cos(theta)\ny_track_inner = track_radius_inner*np.sin(theta)\nx_track_outer = track_radius_outer*np.cos(theta)\ny_track_outer = track_radius_outer*np.sin(theta)\n\n\n\n\ndef plot_trajectories(axis, axis_name, trajectory_array):\n    axis.set_title(axis_name)\n    time_horizon, trajectory_length, state_dim = trajectory_array.shape\n    i = 0\n    speed_vec = np.zeros((time_horizon))\n    for i in range(time_horizon):\n        if (trajectory_array[i,0,:] == 0).all():  # Means that the system stopped saving trajectories because of task failure\n            break\n        axis.plot(trajectory_array[i,:,0], trajectory_array[i,:,1], 'b', linewidth=0.5, alpha=0.7)\n\n        # Compute the speed\n        speed_vec[i] = np.linalg.norm(trajectory_array[i,0,2:])\n        # Check for tube failure\n\n        axis.plot(trajectory_array[:i,0,0], trajectory_array[:i,0,1], 'y', linewidth=2, alpha=1.0)\n    average_speed = np.mean(speed_vec)\n    print(axis_name)\n    print(average_speed)\n    axis.plot(x_track_inner, y_track_inner, 'r', linewidth=2)\n    axis.plot(x_track_outer, y_track_outer, 'r', linewidth=2)\n\n\ndef plot_trajectories2(axis, axis_name, trajectory_array):\n    axis.set_title(axis_name)\n    time_horizon, state_dim = trajectory_array.shape\n    i = 0\n    speed_vec = np.zeros((time_horizon))\n    for i in range(time_horizon):\n\n\n        # Compute the speed\n        speed_vec[i] = np.linalg.norm(trajectory_array[i,2:])\n        # Check for tube failure\n\n        axis.plot(trajectory_array[:i,0], trajectory_array[:i,1], 'y', linewidth=2, alpha=1.0)\n    average_speed = np.mean(speed_vec)\n    print(axis_name)\n    print(average_speed)\n    axis.plot(x_track_inner, y_track_inner, 'r', linewidth=2)\n    axis.plot(x_track_outer, y_track_outer, 'r', linewidth=2)\n\n\n\ndef main(args):\n    build_dir = args['build_dir']\n    data_dir = build_dir + 'tests/controllers/'\n    CCM_true = args['CCM']\n    # Create the figures for each subplot\n    fig, axs = plt.subplots(2,2)\n\n    for ax in axs.flat:\n        ax.set(xlabel='X Position (m)', ylabel='Y Position (m)')\n\n    # Hide x labels and tick labels for top plots and y ticks for right plots.\n    for ax in axs.flat:\n        ax.label_outer()\n\n    # # Plot the vanilla trajectory\n    # vanilla_nominal = np.load(data_dir + 'vanilla_nominal.npy')\n    # plot_trajectories(axs[0,0], 'Vanilla MPPI Nominal Disturbance', vanilla_nominal)\n    #\n    # # Vanilla MPPI with large noise\n    # vanilla_large = np.load(data_dir + 'vanilla_large_track_feedback.npy')\n    # plot_trajectories(axs[0,1], 'Vanilla MPPI Tracking Large Disturbance', vanilla_large)\n    #\n    # # Vanilla MPPI with large noise + a tracking controller\n    # # vanilla_large_track = np.load(data_dir + 'vanilla_large_track.npy')\n    # # plot_trajectories(axs[1,0], 'Vanilla MPPI Large Disturbance + Tracking', vanilla_large_track)\n    #\n    # # Tube MPPI with large noise\n    # tube_large = np.load(data_dir + 'tube_large_actual.npy')\n    # plot_trajectories(axs[1,0], 'Tube MPPI Large Disturbance', tube_large)\n    #\n    # # RMPPI with large noise\n    # # Create the figures for each subplot\n    # # fig2, axs2 = plt.subplots(2,2)\n    # #\n    # # for ax in axs2.flat:\n    # #     ax.set(xlabel='X Position (m)', ylabel='Y Position (m)')\n    # robust_large = np.load(data_dir + 'robust_large_actual.npy')\n    # plot_trajectories(axs[1,1], 'Robust LQR MPPI Actual', robust_large)\n\n    if (CCM_true):\n        fig, axs = plt.subplots(2,2)\n\n        for ax in axs.flat:\n            ax.set(xlabel='X Position (m)', ylabel='Y Position (m)')\n        # robust_ccm = np.load(data_dir + 'robust_large_actual_CCM_t_4950.npy')\n        # plot_trajectories(axs[0,0], 'Robust CCM MPPI Actual', robust_ccm)\n        #\n        # robust_ccm = np.load(data_dir + 'robust_large_nominal_CCM_t_4950.npy')\n        # plot_trajectories(axs[0,1], 'Robust CCM MPPI Nominal', robust_ccm)\n\n        robust_ccm = np.load(data_dir + 'robust_large_actual_traj_CCM_t_2999.npy')\n        plot_trajectories2(axs[0,0], 'Robust CCM MPPI Actual', robust_ccm)\n\n        robust_ccm = np.load(data_dir + 'robust_large_nominal_traj_CCM_t_2999.npy')\n        plot_trajectories(axs[0,1], 'Robust CCM MPPI Nominal', robust_ccm)\n\n        robust_large = np.load(data_dir + 'robust_large_actual.npy')\n        plot_trajectories(axs[1,0], 'Robust LQR MPPI Actual', robust_large)\n\n        robust_large = np.load(data_dir + 'robust_large_nominal.npy')\n        plot_trajectories(axs[1,1], 'Robust LQR MPPI Nominal', robust_large)\n\n    # robust_large = np.load(data_dir + 'robust_ancillary.npy')\n    # plot_trajectories(axs2[1,0], 'Robust MPPI Ancillary', robust_large)\n\n\n# # Tube MPPI ancillary trajectories\n    # tube_ancillary = np.load(data_dir + 'tube_ancillary.npy')\n    # plot_trajectories(axs[1,1], 'Tube MPPI Ancillary Solution', tube_ancillary)\n\n    plt.show()\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description = 'Say hello')\n    parser.add_argument('--build_dir', help='Location of MPPI-Generic build folder', required=True)\n    parser.add_argument('--CCM', help=\"Plot the CCM stuff?\", required=False, default=0)\n    args = vars(parser.parse_args())\n\n    main(args)\n"
  },
  {
    "path": "scripts/double_integrator/plot_DI_test_trajectories_tube_only.py",
    "content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport argparse\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_gtk3agg import FigureCanvas\nfrom matplotlib.backends.backend_gtk3 import (\n    NavigationToolbar2GTK3 as NavigationToolbar)\n\ntrack_radius_outer = 2 + .125\ntrack_radius_inner = 2 - .125\n\n# Draw the bounds for the track\ntheta = np.linspace(0,2*np.pi, 1000)\nx_track_inner = track_radius_inner*np.cos(theta)\ny_track_inner = track_radius_inner*np.sin(theta)\nx_track_outer = track_radius_outer*np.cos(theta)\ny_track_outer = track_radius_outer*np.sin(theta)\n\ncur_timestep = 0\nnominal = None\nactual = None\nfeedback = None\naxs = None\n\ndef callback(event):\n    global actual, acillary, nominal, feedback\n    global axs\n\n    ''' this function gets called if we hit a key'''\n    plt.cla()\n    plot_boundaries(axs)\n    global cur_timestep\n    if event.key == \"right\":\n        # print((actual[cur_timestep,0,:] == 0).all())\n        if not (actual[cur_timestep,0,:] == 0).all():  # Means that the system stopped saving trajectories because of task failure\n            cur_timestep += 1\n    elif event.key == \"left\":\n        if cur_timestep > 0:\n            cur_timestep -= 1\n\n\n    # only plot the last n states\n    states = 100\n    min_index = max(cur_timestep - states, 0)\n\n    # plot actual trajectory\n    axs.plot(actual[cur_timestep,:,0], actual[cur_timestep,:,1], 'b', linewidth=2.0, label='actual trajectory')\n    axs.plot(actual[min_index:cur_timestep,0,0], actual[min_index:cur_timestep,0,1], 'bx', label='actual state')\n\n    # plot actual state with feedback\n    axs.plot(feedback[cur_timestep,:,0], feedback[cur_timestep,:,1], 'c', linewidth=2.0, label='actual feedback trajectory')\n\n    # plot ancillary trajectory\n    # axs.plot(ancillary[cur_timestep,:,0], ancillary[cur_timestep,:,1], 'k', linewidth=2.0, label='ancillary trajectory')\n\n    # plt nominal state\n    axs.plot(nominal[cur_timestep,:,0], nominal[cur_timestep,:,1], 'g', linewidth=2.0, label='nominal trajectory')\n    axs.plot(nominal[min_index:cur_timestep,0,0], nominal[min_index:cur_timestep,0,1], 'g+', label='nominal state')\n\n    # fix legend\n    plt.legend(bbox_to_anchor=(.80, 1.15), loc='upper left', borderaxespad=0.)\n\ndef plot_trajectories(axis, axis_name, trajectory_array):\n    axis.set_title(axis_name)\n    time_horizon, trajectory_length, state_dim = trajectory_array.shape\n    i = 0\n    for i in range(time_horizon):\n        if (trajectory_array[i,0,:] == 0).all():  # Means that the system stopped saving trajectories because of task failure\n            break\n        axis.plot(trajectory_array[i,:,0], trajectory_array[i,:,1], 'b', linewidth=0.5)\n    axis.plot(trajectory_array[:i,0,0], trajectory_array[:i,0,1], 'y', linewidth=2)\n\ndef plot_boundaries(axis):\n    axis.plot(x_track_inner, y_track_inner, 'r', linewidth=2, label='boundary')\n    axis.plot(x_track_outer, y_track_outer, 'r', linewidth=2)\n\n\ndef main(args):\n    build_dir = args['build_dir']\n    controller_flag = args['controller']\n    data_dir = build_dir + 'tests/controllers/'\n    # Create the figures for each subplot\n    global axs\n    fig, axs = plt.subplots(1,1)\n\n    # set up callback for arrow keys\n    fig.canvas.mpl_connect('key_press_event', callback)\n\n\n    axs.set(xlabel='X Position (m)', ylabel='Y Position (m)')\n\n    # # Hide x labels and tick labels for top plots and y ticks for right plots.\n    # for ax in axs.flat:\n    #     ax.label_outer()\n\n\n    # Tube MPPI with large noise\n    global actual, ancillary, nominal, feedback\n    if controller_flag == '0':  # 0 for tube, 1 for robust\n        actual = np.load(data_dir + 'tube_large_actual.npy')\n        nominal = np.load(data_dir + 'tube_large_nominal.npy')\n        ancillary = np.load(data_dir + 'tube_ancillary.npy')\n        feedback = np.load(data_dir + 'tube_large_feedback.npy')\n    elif controller_flag == '1':\n        actual = np.load(data_dir + 'robust_large_actual.npy')\n        nominal = np.load(data_dir + 'robust_large_nominal.npy')\n        ancillary = np.load(data_dir + 'robust_ancillary.npy')\n        feedback = np.load(data_dir + 'robust_large_feedback.npy')\n    else:\n        pass\n    plot_boundaries(axs)\n    # TODO plot the initial location\n\n    plt.show()\n\nif __name__ == \"__main__\":\n    parser = argparse.ArgumentParser(description = 'Say hello')\n    parser.add_argument('--build_dir', help='Location of MPPI-Generic build folder', required=True)\n    parser.add_argument('--controller', help='0: Tube MPPI, 1: Robust MPPI', required=True)\n    args = vars(parser.parse_args())\n\n    main(args)\n"
  },
  {
    "path": "scripts/double_integrator/plot_stuff.py",
    "content": "import numpy as np\nfrom matplotlib import rc\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 16})\nrc('text', usetex=True)\nimport matplotlib.pyplot as plt\nimport argparse\n\ntrack_radius_outer = 2 + .125\ntrack_radius_inner = 2 - .125\n\n# Draw the bounds for the track\ntheta = np.linspace(0,2*np.pi, 1000)\nx_track_inner = track_radius_inner*np.cos(theta)\ny_track_inner = track_radius_inner*np.sin(theta)\nx_track_outer = track_radius_outer*np.cos(theta)\ny_track_outer = track_radius_outer*np.sin(theta)\n\ndef plot_fe_vs_time(fe_array, crash_cost=1.0, title=\"\", savefig=0):\n\tfig = plt.figure()\n\taxis = plt.gca()\n\t# axis.set_title(title)\n\ttime = np.linspace(0,fe_array.shape[0]*0.02, fe_array.shape[0])\n\tplt.plot(time, np.ones_like(time), 'r', alpha=0.7, label='Crash Threshold')\n\tplt.plot(time, fe_array/crash_cost, 'b', alpha=0.7, label='Free Energy')\n\tplt.legend()\n\tif savefig:\n\t\tfig.savefig(title+'.pdf', bbox_inches='tight')\n\ndef plot_fe_bounded(fe_array, fe_bound, title=\"\", savefig=0):\n\tfig = plt.figure()\n\taxis = plt.gca()\n\t# axis.set_title(title)\n\t# plt.yscale('log')\n\ttime = np.linspace(0,fe_array.shape[0]*0.02, fe_array.shape[0])\n\tplt.plot(time, fe_array, 'b', alpha=0.7, label='FE Increase')\n\tplt.plot(time, fe_bound, 'r', alpha=0.7, label='Bound')\n\tplt.legend(bbox_to_anchor=(1.05, 1),loc='upper left')\n\tif savefig:\n\t\tfig.savefig(title+'.pdf', bbox_inches='tight')\n\ndef plot_fe_vs_space(fe_array, state_array):\n\tpass\n\ndef plot_trajectory(trajectory, title=\"\", savefig=0):\n\tfig = plt.figure()\n\taxis = plt.gca()\n\t# axis.set_title(title)\n\taxis.set_aspect('equal', 'box')\n\taxis.set(xlim=(-2.5, 2.5), ylim=(-2.5, 2.5))\n\taxis.plot(trajectory[:,0], trajectory[:,1], 'limegreen', alpha=0.95, linewidth=1.5, label='Actual Trajectory')\n\taxis.plot(x_track_inner, y_track_inner, 'k', linewidth=2)\n\taxis.plot(x_track_outer, y_track_outer, 'k', linewidth=2)\n\tif savefig:\n\t\tfig.savefig(title+'_state_trajectory.pdf', bbox_inches='tight')\n\n\ndef plot_nominal_trajectory(trajectory, nominal_trajectory, title=\"\", savefig=0):\n\tfig = plt.figure()\n\taxis = plt.gca()\n\t# axis.set_title(title)\n\taxis.set_aspect('equal', 'box')\n\taxis.set(xlim=(-2.3, 2.3), ylim=(-2.3, 2.3))\n\n\ttime_horizon, trajectory_length, state_dim = nominal_trajectory.shape\n\n\tfor i in range(time_horizon-1):\n\t\taxis.plot(nominal_trajectory[i,:,0], nominal_trajectory[i,:,1], 'b', linewidth=1.0, alpha=0.7)\n\n\taxis.plot(nominal_trajectory[time_horizon-1,:,0], nominal_trajectory[time_horizon-1,:,1], 'b', linewidth=1.0, alpha=0.7, label='Nom')\n\taxis.plot(trajectory[:,0], trajectory[:,1], 'limegreen', alpha=0.95, linewidth=1.5, label='Act')\n\taxis.plot(x_track_inner, y_track_inner, 'k', linewidth=2)\n\taxis.plot(x_track_outer, y_track_outer, 'k', linewidth=2)\n\t# plt.legend(bbox_to_anchor=(1.05, 1),loc='upper left')\n\tif savefig:\n\t\tfig.savefig(title+'_nominal_trajectory.pdf', bbox_inches='tight')\n\n\n\n\ndef plot_nominal_state_used_vs_time(ns_array, title=\"\"):\n\tplt.figure()\n\taxis = plt.gca()\n\taxis.set_title(title)\n\ttime = np.linspace(0,ns_array.shape[0]*0.02, ns_array.shape[0])\n\tplt.scatter(time, ns_array)\n\n\ndef main(args):\n\tbuild_dir = args['build_dir']\n\tdata_dir = build_dir + 'examples/'\n\tshow_plot = args['show_fig']\n\tsave_fig = args['save_fig']\n\n\tvanilla_fe = np.load(data_dir + \"vanilla_free_energy.npy\")\n\tvanilla_state_trajectory = np.load(data_dir + \"vanilla_state_trajectory.npy\")\n\tvanilla_robust_state_trajectory = np.load(data_dir + \"vanilla_large_robust_state_trajectory.npy\")\n\tvanilla_robust_fe = np.load(data_dir + \"vanilla_large_robust_free_energy.npy\")\n\n\t# vanilla_large_fe = np.load(data_dir + \"vanilla_large_free_energy.npy\")\n\tvanilla_large_state_trajectory = np.load(data_dir + \"vanilla_large_state_trajectory.npy\")\n\tvanilla_sc_nominal_trajectory = np.load(data_dir + \"vanilla_nominal_trajectory.npy\")\n\n\n\t# tube_large_rfe = np.load(data_dir + \"tube_real_free_energy.npy\")\n\t# tube_large_nfe = np.load(data_dir + \"tube_nominal_free_energy.npy\")\n\ttube_large_state_trajectory = np.load(data_dir + \"tube_state_trajectory.npy\")\n\ttube_large_nominal_state_used = np.load(data_dir + \"tube_nominal_state_used.npy\")\n\ttube_sc_nominal_trajectory = np.load(data_dir + \"tube_nominal_trajectory.npy\")\n\n\ttube_robust_state_trajectory = np.load(data_dir + \"tube_robust_state_trajectory.npy\")\n\ttube_robust_nominal_trajectory = np.load(data_dir + \"tube_robust_nominal_trajectory.npy\")\n\ttube_robust_rfe = np.load(data_dir + \"tube_robust_real_free_energy.npy\")\n\n\n\trobust_sc_rfe = np.load(data_dir + \"robust_sc_real_free_energy.npy\")\n\t# robust_sc_nfe = np.load(data_dir + \"robust_sc_nominal_free_energy.npy\")\n\trobust_sc_state_trajectory = np.load(data_dir + \"robust_sc_state_trajectory.npy\")\n\trobust_sc_nominal_state_used = np.load(data_dir + \"robust_sc_nominal_state_used.npy\")\n\trobust_sc_nominal_trajectory = np.load(data_dir + \"robust_sc_nominal_trajectory.npy\")\n\n\n\trobust_rc_rfe = np.load(data_dir + \"robust_rc_real_free_energy.npy\")\n\t# robust_rc_nfe = np.load(data_dir + \"robust_rc_nominal_free_energy.npy\")\n\trobust_rc_state_trajectory = np.load(data_dir + \"robust_rc_state_trajectory.npy\")\n\trobust_rc_nominal_state_used = np.load(data_dir + \"robust_rc_nominal_state_used.npy\")\n\trobust_rc_rfe_bound = np.load(data_dir + \"robust_rc_real_free_energy_bound.npy\")\n\trobust_rc_nfe_bound = np.load(data_dir + \"robust_rc_nominal_free_energy_bound.npy\")\n\trobust_rc_rfe_growth_bound = np.load(data_dir + \"robust_rc_real_free_energy_growth_bound.npy\")[1:]\n\t# robust_rc_rfc_growth = np.load(data_dir + \"robust_rc_real_free_energy_growth.npy\")\n\trobust_rc_rfc_growth = robust_rc_rfe[1:] - robust_rc_rfe[:-1]\n\trobust_rc_nfc_growth = np.load(data_dir + \"robust_rc_nominal_free_energy_growth.npy\")\n\trobust_rc_nominal_trajectory = np.load(data_dir + \"robust_rc_nominal_trajectory.npy\")\n\n\n\trobust_ccm_rfe = np.load(data_dir + \"robust_large_actual_free_energy_CCM_t_2999.npy\")\n\trobust_ccm_nfe = np.load(data_dir + \"robust_large_nominal_free_energy_CCM_t_2999.npy\")\n\trobust_ccm_state_trajectory = np.load(data_dir + \"robust_large_actual_traj_CCM_t_2999.npy\")\n\trobust_ccm_nominal_state_used = np.load(data_dir + \"robust_large_nominal_state_used_CCM_t_2999.npy\")\n\trobust_ccm_rfe_bound = np.load(data_dir + \"robust_large_actual_free_energy_bound_CCM_t_2999.npy\")\n\trobust_ccm_nfe_bound = np.load(data_dir + \"robust_large_nominal_free_energy_bound_CCM_t_2999.npy\")\n\trobust_ccm_rfe_growth_bound = np.load(data_dir + \"robust_large_actual_free_energy_growth_bound_CCM_t_2999.npy\")\n\t# robust_ccm_rfc_growth = np.load(data_dir + \"robust_large_actual_free_energy_growth_CCM_t_2999.npy\")\n\trobust_ccm_rfc_growth = robust_ccm_rfe[1:] - robust_ccm_rfe[:-1]\n\trobust_ccm_nfc_growth = np.load(data_dir + \"robust_large_nominal_free_energy_growth_CCM_t_2999.npy\")\n\trobust_ccm_nominal_trajectory = np.load(data_dir + \"robust_large_nominal_traj_CCM_t_2999.npy\")\n\n\n\n\t# plot_fe_vs_time(vanilla_fe, 1000,\"Vanilla Real Free Energy\")\n\t# plot_trajectory(vanilla_state_trajectory, \"Vanilla State Trajectory\", save_fig)\n\n\t# plot_fe_vs_time(vanilla_large_fe, 1000,\"Vanilla Large Real Free Energy\")\n\t# plot_trajectory(vanilla_large_state_trajectory, \"Vanilla Large State Trajectory\", save_fig)\n\n\t# plot_fe_vs_time(vanilla_robust_fe, 100, \"Vanilla Robust Real Free Energy\", save_fig)\n\t# plot_trajectory(vanilla_robust_state_trajectory, \"Vanilla Robust State Trajectory\", save_fig)\n\t#\n\t# plot_fe_vs_time(tube_large_rfe, 1000,\"Tube Real Free Energy\")\n\t# plot_fe_vs_time(tube_large_nfe, 1000,\"Tube Nominal Free Energy\")\n\t# plot_fe_vs_time(tube_robust_rfe, 100, \"Tube Robust Real Free Energy\", save_fig)\n\t# plot_trajectory(tube_large_state_trajectory, \"Tube State Trajectory\", save_fig)\n\t# plot_nominal_trajectory(tube_large_state_trajectory, tube_sc_nominal_trajectory, \"Tube Standard State Trajectory\", save_fig)\n\t# plot_nominal_trajectory(tube_robust_state_trajectory, tube_robust_nominal_trajectory, \"Tube Robust State Trajectory\", save_fig)\n\n# # plot_nominal_state_used_vs_time(tube_large_nominal_state_used)\n\t#\n\t# plot_fe_vs_time(robust_sc_rfe, 1000, \"Robust Standard Real Free Energy\", save_fig)\n\t# plot_fe_vs_time(robust_sc_nfe, 1000, \"Robust Standard Nominal Free Energy\")\n\t# plot_trajectory(robust_sc_state_trajectory, \"Robust Standard State Trajectory\")\n\t# # plot_nominal_state_used_vs_time(robust_sc_nominal_state_used)\n\t# plot_nominal_trajectory(robust_sc_state_trajectory, robust_sc_nominal_trajectory, \"Robust Standard State Trajectory\", save_fig)\n\n\t# plot_fe_vs_time(robust_rc_rfe, 100, \"Robust Robust Real Free Energy\", save_fig)\n\tplot_fe_bounded(robust_rc_rfc_growth, robust_rc_rfe_growth_bound, \"Robust Robust Real Free Energy Growth\")\n\t# plot_fe_bounded(robust_rc_nfe, robust_rc_nfe_bound, \"Robust Robust Nominal Free Energy\")\n\t# plot_fe_vs_time(robust_rc_nfc_growth, 0,\"Robust Robust Nominal Free Energy Growth\")\n\t# plot_trajectory(robust_rc_state_trajectory, \"Robust Robust State Trajectory\")\n\t# plot_nominal_trajectory(robust_rc_state_trajectory, robust_rc_nominal_trajectory, \"Robust Robust State Trajectory\", save_fig)\n\n\t# plot_nominal_state_used_vs_time(robust_rc_nominal_state_used)\n\n\t# Correct bound\n\tincorrect_bound = (robust_ccm_rfe_bound - robust_ccm_nfe)\n\tcorrect_bound = 8*incorrect_bound\n\t# plot_fe_vs_time(robust_ccm_rfe, 100, \"Robust CCM Real Free Energy\", save_fig)\n\tplot_fe_bounded(robust_ccm_rfc_growth, (robust_ccm_rfe_growth_bound-incorrect_bound+correct_bound)[1:], \"Robust CCM Real Free Energy Growth\")\n\t# plot_fe_bounded(robust_ccm_nfe, robust_ccm_nfe_bound, \"Robust CCM Nominal Free Energy\")\n\t# plot_fe_vs_time(robust_ccm_nfc_growth, 0,\"Robust CCM Nominal Free Energy Growth\")\n\t# plot_trajectory(robust_ccm_state_trajectory, \"Robust CCM State Trajectory\")\n\t# plot_nominal_state_used_vs_time(robust_ccm_nominal_state_used)\n\t# plot_nominal_trajectory(robust_ccm_state_trajectory, robust_ccm_nominal_trajectory, \"Robust CCM State Trajectory\", save_fig)\n\n\tif show_plot:\n\t\tplt.show()\n\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description = 'Say hello')\n\tparser.add_argument('--build_dir', help='Location of MPPI-Generic build folder', required=True)\n\tparser.add_argument('--show_fig', default=False, help='Show the figure upon completion of plotting.', required=False)\n\tparser.add_argument('--save_fig', default=False, help='Save the figures after they are generated.', required=False)\n\n\targs = vars(parser.parse_args())\n\n\tmain(args)\n"
  },
  {
    "path": "scripts/run_autoformatter.sh",
    "content": "#! /usr/bin/env bash\nif [ \"$1\" == \"\" ]; then\n  dir=\"./\"\nelse\n  dir=\"$1\"\nfi\n\n# Format cuda header files\necho \"Formatting cuh files...\"\nfind $dir -name \"*.cuh\" ! -path \"*build*\" ! -path \"*submodules*\" | xargs -I % bash -c \"clang-format --style=file -i --assume-filename=cu %\"\n\n# Format cuda source files\necho \"Formatting cu files...\"\nfind $dir -name \"*.cu\" ! -path \"*build*\" ! -path \"*submodules*\" | xargs -I % bash -c \"clang-format --style=file -i %\"\n\n# Format cpp source files\necho \"Formatting cpp files...\"\nfind $dir -name \"*.cpp\" ! -path \"*build*\" ! -path \"*submodules*\" | xargs -I % bash -c \"clang-format --style=file -i %\"\n\n# Format h source files\necho \"Formatting h files...\"\nfind $dir -name \"*.h\" ! -path \"*build*\" ! -path \"*submodules*\" | xargs -I % bash -c \"clang-format --style=file -i %\"\n\n# Format hpp source files\necho \"Formatting hpp files...\"\nfind $dir -name \"*.hpp\" ! -path \"*build*\" ! -path \"*submodules*\" | xargs -I % bash -c \"clang-format --style=file -i %\"\n"
  },
  {
    "path": "src/CMakeLists.txt",
    "content": "add_subdirectory(controllers)"
  },
  {
    "path": "src/controllers/CMakeLists.txt",
    "content": "# Needed for methods to be built into libraries in Release mode\nset(CMAKE_CUDA_FLAGS \"${CMAKE_CUDA_FLAGS} -Xcompiler=-fno-guess-branch-probability\")\n\nadd_subdirectory(cartpole)\nadd_subdirectory(autorally)\nadd_subdirectory(double_integrator)\nadd_subdirectory(quadrotor)"
  },
  {
    "path": "src/controllers/autorally/CMakeLists.txt",
    "content": "set(LIBRARY_NAME autorally_mppi)\nSET (LIB_MAJOR 0)\nSET (LIB_MINOR 0)\nSET (LIB_RELEASE 1)\n\nadd_library(${LIBRARY_NAME} SHARED\n        autorally_mppi.cu\n        )\n\ntarget_link_libraries( ${LIBRARY_NAME}\n        ${YAML_CPP_LIBRARIES}\n        ${MPPI_HEADER_LIBRARY_NAME}\n        )\n\nSET (_soversion ${LIB_MAJOR}.${LIB_MINOR}.${LIB_RELEASE})\n\ninstall(TARGETS ${LIBRARY_NAME}\n        # IMPORTANT: Add the library to the \"export-set\"\n        EXPORT ${PROJECT_NAME}-targets\n        RUNTIME DESTINATION bin\n        LIBRARY DESTINATION lib/${CMAKE_PROJECT_NAME}\n        )\n"
  },
  {
    "path": "src/controllers/autorally/autorally_mppi.cu",
    "content": "#include <mppi/instantiations/autorally_mppi/autorally_mppi.cuh>\n\n/*\n * This file contains the instantiations of the controller for the cart pole.\n * Will have a dynamics model of cartpole, some cost function,\n * and a controller of just MPPI, (not tube or R)\n */\n\n// Convenience typedef for the MPPI Controller.\ntemplate class VanillaMPPIController<DynamicsModel, CostFunctionClass, FEEDBACK_T, NUM_TIMESTEPS, MPPI_NUM_ROLLOUTS__,\n                                     Sampler>;\n\n// template class VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost, 100, 150, 64, 8>;\n"
  },
  {
    "path": "src/controllers/cartpole/CMakeLists.txt",
    "content": "set(LIBRARY_NAME cartpole_mppi)\nSET (LIB_MAJOR 0)\nSET (LIB_MINOR 0)\nSET (LIB_RELEASE 1)\n\nadd_library(${LIBRARY_NAME} SHARED\n        cartpole_mppi.cu\n        )\n\ntarget_link_libraries( ${LIBRARY_NAME}\n        ${YAML_CPP_LIBRARIES}\n        ${MPPI_HEADER_LIBRARY_NAME}\n        )\n\nSET (_soversion ${LIB_MAJOR}.${LIB_MINOR}.${LIB_RELEASE})\n\ninstall(TARGETS ${LIBRARY_NAME}\n        # IMPORTANT: Add the library to the \"export-set\"\n        EXPORT ${PROJECT_NAME}-targets\n        RUNTIME DESTINATION bin\n        LIBRARY DESTINATION lib/${CMAKE_PROJECT_NAME}\n        )\n"
  },
  {
    "path": "src/controllers/cartpole/cartpole_mppi.cu",
    "content": "#include <mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh>\n\n/*\n * This file contains the instantiations of the controller for the cart pole.\n * Will have a dynamics model of cartpole, some cost function,\n * and a controller of just MPPI, (not tube or R)\n */\n// Num_timesteps, num_rollouts, blockdim x, blockdim y\nconst int NUMBER_TIMESTEPS = 100;\n// template class GPUFeedbackController<DeviceDDPImpl<DeviceDDP<CartpoleDynamics>, CartpoleDynamics>, CartpoleDynamics>;\n// template class DeviceDDPImpl<DeviceDDP<CartpoleDynamics>, CartpoleDynamics>;\n// template class DeviceDDP<CartpoleDynamics>;\ntemplate class DDPFeedback<CartpoleDynamics, 150>;\n\ntemplate class DDPFeedback<CartpoleDynamics, NUMBER_TIMESTEPS>;\n\n#define SAMPLER_T mppi::sampling_distributions::GaussianDistribution<CartpoleDynamics::DYN_PARAMS_T>\n\ntemplate class SAMPLER_T;\n\ntemplate class mppi::sampling_distributions::GaussianDistributionImpl<\n    SAMPLER_T, mppi::sampling_distributions::GaussianParams, CartpoleDynamics::DYN_PARAMS_T>;\n\n// template __host__ void mppi::sampling_distributions::SamplingDistribution<SAMPLER_T,\n//          mppi::sampling_distributions::GaussianParams,\n//          CartpoleDynamics::DYN_PARAMS_T>::freeCudaMem();\ntemplate class mppi::sampling_distributions::SamplingDistribution<\n    SAMPLER_T, mppi::sampling_distributions::GaussianParams, CartpoleDynamics::DYN_PARAMS_T>;\n\ntemplate class Controller<CartpoleDynamics, CartpoleQuadraticCost, DDPFeedback<CartpoleDynamics, NUMBER_TIMESTEPS>,\n                          SAMPLER_T, NUMBER_TIMESTEPS, 2048>;\ntemplate class Controller<CartpoleDynamics, CartpoleQuadraticCost, DDPFeedback<CartpoleDynamics, NUMBER_TIMESTEPS>,\n                          SAMPLER_T, NUMBER_TIMESTEPS, 256>;\ntemplate class Controller<CartpoleDynamics, CartpoleQuadraticCost, DDPFeedback<CartpoleDynamics, 150>, SAMPLER_T, 150,\n                          512>;\n\ntemplate class VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost,\n                                     DDPFeedback<CartpoleDynamics, NUMBER_TIMESTEPS>, NUMBER_TIMESTEPS, 2048>;\ntemplate class VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost,\n                                     DDPFeedback<CartpoleDynamics, NUMBER_TIMESTEPS>, NUMBER_TIMESTEPS, 256>;\ntemplate class VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost, DDPFeedback<CartpoleDynamics, 150>, 150,\n                                     512>;\n\n#undef SAMPLER_T\n// template class TubeMPPIController<CartpoleDynamics, CartpoleQuadraticCost,\n//                                   DDPFeedback<CartpoleDynamics, NUMBER_TIMESTEPS>, NUMBER_TIMESTEPS, 2048>;\n"
  },
  {
    "path": "src/controllers/double_integrator/CMakeLists.txt",
    "content": "set(LIBRARY_NAME double_integrator_mppi)\nSET (LIB_MAJOR 0)\nSET (LIB_MINOR 0)\nSET (LIB_RELEASE 1)\n\nadd_library(${LIBRARY_NAME} SHARED\n        double_integrator_mppi.cu\n        )\n\ntarget_link_libraries( ${LIBRARY_NAME}\n        ${YAML_CPP_LIBRARIES}\n        ${MPPI_HEADER_LIBRARY_NAME}\n        )\n\nSET (_soversion ${LIB_MAJOR}.${LIB_MINOR}.${LIB_RELEASE})\n\ninstall(TARGETS ${LIBRARY_NAME}\n        # IMPORTANT: Add the library to the \"export-set\"\n        EXPORT ${PROJECT_NAME}-targets\n        RUNTIME DESTINATION bin\n        LIBRARY DESTINATION lib/${CMAKE_PROJECT_NAME}\n        )\n"
  },
  {
    "path": "src/controllers/double_integrator/double_integrator_mppi.cu",
    "content": "#include <mppi/instantiations/double_integrator_mppi/double_integrator_mppi.cuh>\n\n/*\n * This file contains the instantiations of the controller for the cart pole.\n * Will have a dynamics model of cartpole, some cost function,\n * and a controller of just MPPI, (not tube or R)\n */\n// Num_timesteps, num_rollouts, blockdim x, blockdim y\ntypedef mppi::sampling_distributions::GaussianDistribution<DoubleIntegratorDynamics::DYN_PARAMS_T> Sampler;\n\ntemplate class VanillaMPPIController<DoubleIntegratorDynamics, DoubleIntegratorCircleCost,\n                                     DDPFeedback<DoubleIntegratorDynamics, 100>, 100, 512, Sampler>;\ntemplate class VanillaMPPIController<DoubleIntegratorDynamics, DoubleIntegratorCircleCost,\n                                     DDPFeedback<DoubleIntegratorDynamics, 50>, 50, 1024, Sampler>;\n\ntemplate class TubeMPPIController<DoubleIntegratorDynamics, DoubleIntegratorCircleCost,\n                                  DDPFeedback<DoubleIntegratorDynamics, 100>, 100, 512, Sampler>;\ntemplate class TubeMPPIController<DoubleIntegratorDynamics, DoubleIntegratorCircleCost,\n                                  DDPFeedback<DoubleIntegratorDynamics, 50>, 50, 1024, Sampler>;\ntemplate class TubeMPPIController<DoubleIntegratorDynamics, DoubleIntegratorCircleCost,\n                                  DDPFeedback<DoubleIntegratorDynamics, 100>, 100, 1024, Sampler>;\n"
  },
  {
    "path": "src/controllers/quadrotor/CMakeLists.txt",
    "content": "set(LIBRARY_NAME quadrotor_mppi)\nSET (LIB_MAJOR 0)\nSET (LIB_MINOR 0)\nSET (LIB_RELEASE 1)\n\nadd_library(${LIBRARY_NAME} SHARED\n        quadrotor_mppi.cu\n        )\n\ntarget_link_libraries( ${LIBRARY_NAME}\n        ${YAML_CPP_LIBRARIES}\n        ${MPPI_HEADER_LIBRARY_NAME}\n        )\n\nSET (_soversion ${LIB_MAJOR}.${LIB_MINOR}.${LIB_RELEASE})\n\ninstall(TARGETS ${LIBRARY_NAME}\n        # IMPORTANT: Add the library to the \"export-set\"\n        EXPORT ${PROJECT_NAME}-targets\n        RUNTIME DESTINATION bin\n        LIBRARY DESTINATION lib/${CMAKE_PROJECT_NAME}\n        )\n"
  },
  {
    "path": "src/controllers/quadrotor/quadrotor_mppi.cu",
    "content": "#include <mppi/instantiations/quadrotor_mppi/quadrotor_mppi.cuh>\n\ntypedef mppi::sampling_distributions::GaussianDistribution<QuadrotorDynamics::DYN_PARAMS_T> Sampler;\n\ntemplate class VanillaMPPIController<QuadrotorDynamics, QuadrotorQuadraticCost, DDPFeedback<QuadrotorDynamics, 100>,\n                                     100, 512, Sampler>;\ntemplate class VanillaMPPIController<QuadrotorDynamics, QuadrotorMapCost, DDPFeedback<QuadrotorDynamics, 100>, 100, 512,\n                                     Sampler>;\n"
  },
  {
    "path": "tests/CMakeLists.txt",
    "content": "# Add project includes and cuda includes for g++\n\n#include test utils\ninclude_directories(include)\n\n# Create header file pointing to test neural network\nset(HEADER_PATH ${CMAKE_CURRENT_SOURCE_DIR}/templated_headers/)\n\n# Create header file pointing to test map\nset(TEST_MAP_FOLDER \"${PROJECT_BINARY_DIR}\")\nset(TEST_NETWORK_FOLDER \"${PROJECT_BINARY_DIR}\")\n\nif(MPPI_BUILD_TESTS)\n    message(\"\")\n    find_package(PythonInterp)\n\n    # Create neural network map\n    execute_process(COMMAND ${PYTHON_EXECUTABLE}\n            ${PROJECT_SOURCE_DIR}/scripts/autorally/test/generateTestNetwork.py\n            -o ${TEST_NETWORK_FOLDER}\n            OUTPUT_FILE /dev/null\n            )\n    message(STATUS \"Created ${TEST_NETWORK_FOLDER}/*.npz\")\n    message(\"\")\nendif()\n\nif(MPPI_BUILD_TESTS)\n    message(\"\")\n    find_package(PythonInterp)\n\n    # Create test map\n    execute_process(COMMAND ${PYTHON_EXECUTABLE}\n            ${PROJECT_SOURCE_DIR}/scripts/autorally/test/generateTestMaps.py\n            -o ${TEST_MAP_FOLDER}/\n            OUTPUT_FILE /dev/null\n            )\n    message(STATUS \"Created folder ${TEST_MAP_FOLDER}/*.npz\")\n    message(\"\")\nendif()\n\n\nfile(GLOB HEADERS ${HEADER_PATH}*)\nforeach(T_FILE IN LISTS HEADERS)\n    get_filename_component(T_NAME ${T_FILE} NAME_WE)\n    string(REPLACE \".in\" \"\" STRIPPED_NAME ${T_NAME})\n    message(\"stripped name ${STRIPPED_NAME}\")\n    set(TEST_NETWORK_HEADER_FILE ${PROJECT_BINARY_DIR}/include/${STRIPPED_NAME}.h)\n    configure_file(${T_FILE} ${TEST_NETWORK_HEADER_FILE})\n    message(STATUS \"Generated ${TEST_NETWORK_HEADER_FILE}\")\nendforeach()\n\n\n# Add the generated header files to list of includes\ninclude_directories(${PROJECT_BINARY_DIR}/include)\nmessage(\"\")\n\nadd_subdirectory(dynamics)\nadd_subdirectory(cost_functions)\nadd_subdirectory(feedback_controllers)\nadd_subdirectory(mppi_core)\nadd_subdirectory(controllers)\nadd_subdirectory(misc)\nadd_subdirectory(sampling_distributions)\nadd_subdirectory(shaping_functions)\nadd_subdirectory(math_utils)\nadd_subdirectory(integration)\nadd_subdirectory(texture_helpers)\nadd_subdirectory(nn_helpers)\n"
  },
  {
    "path": "tests/controllers/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n    get_filename_component(T_NAME ${T_FILE} NAME_WE)\n    add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n    target_link_libraries(${T_NAME}\n            ${GTEST_LIBRARIES}\n            ${MPPI_HEADER_LIBRARY_NAME})\n    #  add_test(NAME ${T_NAME} COMMAND ${T_NAME})\n    gtest_add_tests(TARGET ${T_NAME})\n    set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/controllers/controller_generic_tests.cu",
    "content": "#include <gmock/gmock.h>\n#include <gtest/gtest.h>\n#include <mppi/controllers/controller.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/utils/test_helper.h>\n#include <mppi_test/mock_classes/mock_costs.h>\n#include <mppi_test/mock_classes/mock_dynamics.h>\n\n#include <algorithm>\n#include <numeric>\n#include <random>\n\nstatic const int number_rollouts = 1200;\nstatic const int NUM_TIMESTEPS = 100;\nusing FEEDBACK_T = DDPFeedback<MockDynamics, NUM_TIMESTEPS>;\nconst dim3 rolloutDim(1, 2, 1);\nusing SAMPLER_T = mppi::sampling_distributions::GaussianDistribution<MockDynamics::DYN_PARAMS_T>;\n\nclass TestController : public Controller<MockDynamics, MockCost, FEEDBACK_T, SAMPLER_T, NUM_TIMESTEPS, number_rollouts>\n{\npublic:\n  typedef Controller<MockDynamics, MockCost, FEEDBACK_T, SAMPLER_T, NUM_TIMESTEPS, number_rollouts> PARENT_CLASS;\n  using PARAMS_T = PARENT_CLASS::TEMPLATED_PARAMS;\n\n  TestController(MockDynamics* model, MockCost* cost, FEEDBACK_T* fb_controller, SAMPLER_T* sampler, float dt,\n                 int max_iter, float lambda, float alpha, int num_timesteps = 100,\n                 const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n                 cudaStream_t stream = nullptr)\n    : PARENT_CLASS(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha, num_timesteps, init_control_traj,\n                   stream)\n  {\n    // Allocate CUDA memory for the controller\n    allocateCUDAMemoryHelper(0);\n  }\n\n  TestController(MockDynamics* model, MockCost* cost, FEEDBACK_T* fb_controller, SAMPLER_T* sampler, PARAMS_T& params,\n                 cudaStream_t stream = nullptr)\n    : PARENT_CLASS(model, cost, fb_controller, sampler, params, stream)\n  {\n    // Allocate CUDA memory for the controller\n    allocateCUDAMemoryHelper(0);\n  }\n\n  virtual void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride) override\n  {\n  }\n\n  virtual void calculateSampledStateTrajectories() override{};\n\n  void computeControl(const Eigen::Ref<const state_array>& state,\n                      const std::array<control_trajectory, number_rollouts> noise)\n  {\n    int trajectory_size = control_trajectory().size();\n    for (int i = 0; i < number_rollouts; i++)\n    {\n      HANDLE_ERROR(cudaMemcpyAsync(this->sampler_->getControlSample(i, 0, 0), noise[i].data(),\n                                   sizeof(float) * trajectory_size, cudaMemcpyHostToDevice, stream_));\n    }\n    HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    // Normally rolloutKernel would be called here and would transform\n    //  control_noise_d_ from u to u + noise\n\n    // Instead we just get back noise in this test\n    this->copySampledControlFromDevice();\n  }\n\n  virtual void slideControlSequence(int steps) override\n  {\n  }\n\n  cudaStream_t getStream()\n  {\n    return stream_;\n  }\n};\n\nclass ControllerTests : public ::testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    mockDynamics = new MockDynamics();\n    mockCost = new MockCost();\n    mockFeedback = new FEEDBACK_T(mockDynamics, dt);\n    sampler = new SAMPLER_T();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n    MockDynamics tmp_dynamics();\n\n    // expect double check rebind\n    EXPECT_CALL(*mockCost, bindToStream(testing::_)).Times(1);\n    EXPECT_CALL(*mockDynamics, bindToStream(testing::_)).Times(1);\n    // EXPECT_CALL(mockFeedback, bindToStream(stream)).Times(1);\n\n    // expect GPU setup called again\n    EXPECT_CALL(*mockCost, GPUSetup()).Times(1);\n    EXPECT_CALL(*mockDynamics, GPUSetup()).Times(1);\n    // EXPECT_CALL(mockFeedback, GPUSetup()).Times(1);\n\n    controller = new TestController(mockDynamics, mockCost, mockFeedback, sampler, dt, max_iter, lambda, alpha);\n    auto controller_params = controller->getParams();\n    controller_params.dynamics_rollout_dim_ = rolloutDim;\n    controller_params.cost_rollout_dim_ = rolloutDim;\n    controller->setParams(controller_params);\n  }\n  void TearDown() override\n  {\n    delete controller;\n    delete mockDynamics;\n    delete mockCost;\n    delete mockFeedback;\n    delete sampler;\n  }\n\n  MockDynamics* mockDynamics;\n  MockCost* mockCost;\n  FEEDBACK_T* mockFeedback;\n  SAMPLER_T* sampler;\n  TestController* controller;\n\n  float dt = 0.1;\n  int max_iter = 1;\n  float lambda = 1.2;\n  float alpha = 0.1;\n  cudaStream_t stream;\n};\n\nTEST_F(ControllerTests, ConstructorDestructor)\n{\n  int num_timesteps = 10;\n\n  TestController::control_trajectory init_control_trajectory = TestController::control_trajectory::Ones();\n\n  // expect double check rebind\n  EXPECT_CALL(*mockCost, bindToStream(stream)).Times(1);\n  EXPECT_CALL(*mockDynamics, bindToStream(stream)).Times(1);\n  // // EXPECT_CALL(mockFeedback, bindToStream(stream)).Times(1);\n\n  // // expect GPU setup called again\n  EXPECT_CALL(*mockCost, GPUSetup()).Times(1);\n  EXPECT_CALL(*mockDynamics, GPUSetup()).Times(1);\n  // EXPECT_CALL(mockFeedback, GPUSetup()).Times(1);\n  TestController* controller_test = new TestController(mockDynamics, mockCost, mockFeedback, sampler, dt, max_iter,\n                                                       lambda, alpha, num_timesteps, init_control_trajectory, stream);\n\n  EXPECT_EQ(controller_test->model_, mockDynamics);\n  EXPECT_EQ(controller_test->cost_, mockCost);\n  EXPECT_EQ(controller_test->getDt(), dt);\n  EXPECT_EQ(controller_test->getNumIters(), max_iter);\n  EXPECT_EQ(controller_test->getLambda(), lambda);\n  EXPECT_EQ(controller_test->getAlpha(), alpha);\n  EXPECT_EQ(controller_test->getNumTimesteps(), num_timesteps);\n  EXPECT_EQ(controller_test->getControlSeq(), init_control_trajectory);\n  EXPECT_EQ(controller_test->getStream(), stream);\n  EXPECT_EQ(controller_test->getFeedbackEnabled(), false);\n\n  // TODO check that a random seed was set and stream was set\n  // EXPECT_NE(controller_test->getRandomSeed(), 0);\n\n  // TODO check for correct defaults\n  delete controller_test;\n}\n\nTEST_F(ControllerTests, ParamBasedConstructor)\n{\n  int num_timesteps = 10;\n  TestController::TEMPLATED_PARAMS controller_params;\n  controller_params.num_timesteps_ = num_timesteps;\n  controller_params.dt_ = dt;\n  controller_params.num_iters_ = max_iter;\n  controller_params.lambda_ = lambda;\n  controller_params.alpha_ = alpha;\n  controller_params.init_control_traj_ = TestController::control_trajectory::Ones();\n\n  // expect double check rebind\n  EXPECT_CALL(*mockCost, bindToStream(stream)).Times(1);\n  EXPECT_CALL(*mockDynamics, bindToStream(stream)).Times(1);\n  // // EXPECT_CALL(mockFeedback, bindToStream(stream)).Times(1);\n\n  // // expect GPU setup called again\n  EXPECT_CALL(*mockCost, GPUSetup()).Times(1);\n  EXPECT_CALL(*mockDynamics, GPUSetup()).Times(1);\n  // EXPECT_CALL(mockFeedback, GPUSetup()).Times(1);\n  TestController* controller_test =\n      new TestController(mockDynamics, mockCost, mockFeedback, sampler, controller_params, stream);\n\n  EXPECT_EQ(controller_test->model_, mockDynamics);\n  EXPECT_EQ(controller_test->cost_, mockCost);\n  EXPECT_EQ(controller_test->getDt(), dt);\n  EXPECT_EQ(controller_test->getNumIters(), max_iter);\n  EXPECT_EQ(controller_test->getLambda(), lambda);\n  EXPECT_EQ(controller_test->getAlpha(), alpha);\n  EXPECT_EQ(controller_test->getNumTimesteps(), num_timesteps);\n  EXPECT_EQ(controller_test->getControlSeq(), controller_params.init_control_traj_);\n  EXPECT_EQ(controller_test->getStream(), stream);\n  EXPECT_EQ(controller_test->getFeedbackEnabled(), false);\n\n  // TODO check that a random seed was set and stream was set\n  // EXPECT_NE(controller_test->getRandomSeed(), 0);\n\n  // TODO check for correct defaults\n  delete controller_test;\n}\n\nTEST_F(ControllerTests, setNumTimesteps)\n{\n  controller->setNumTimesteps(10);\n  EXPECT_EQ(controller->getNumTimesteps(), 10);\n\n  controller->setNumTimesteps(1000);\n  EXPECT_EQ(controller->getNumTimesteps(), 100);\n}\n\nTEST_F(ControllerTests, smoothControlTrajectory)\n{\n  TestController::control_trajectory u;\n  u.col(0) = TestController::control_array::Ones();\n  u.col(1) = 2.0 * TestController::control_array::Ones();\n  Eigen::Matrix<float, MockDynamics::CONTROL_DIM, 2> control_history =\n      Eigen::Matrix<float, MockDynamics::CONTROL_DIM, 2>::Zero();\n\n  // smooth using only one value from the control trajectory\n  controller->setNumTimesteps(1);\n  controller->smoothControlTrajectoryHelper(u, control_history);\n  for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n  {\n    EXPECT_FLOAT_EQ(u(i, 0), (1.0 * 17 + 1.0 * 12 + 1.0 * -3) / 35.0) << \"i = \" << i;\n  }\n  // Reset trajectory and smooth over two steps\n  u.col(0) = TestController::control_array::Ones();\n  controller->setNumTimesteps(2);\n  controller->smoothControlTrajectoryHelper(u, control_history);\n  for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n  {\n    EXPECT_FLOAT_EQ(u(i, 0), (1.0 * 17 + 2.0 * 12 + 2.0 * -3) / 35.0) << \"i = \" << i;\n    EXPECT_FLOAT_EQ(u(i, 1), (1.0 * 12 + 2.0 * 17 + 2.0 * 12 + 2.0 * -3) / 35.0) << \"i = \" << i;\n  }\n}\n\nTEST_F(ControllerTests, slideControlSequenceHelper)\n{\n  TestController::control_trajectory u;\n  for (int i = 0; i < controller->getNumTimesteps(); i++)\n  {\n    TestController::control_array control = TestController::control_array::Ones();\n    control = control * i;\n    u.col(i) = control.transpose();\n  }\n\n  controller->slideControlSequenceHelper(1, u);\n  for (int i = 0; i < controller->getNumTimesteps(); i++)\n  {\n    for (int j = 0; j < MockDynamics::CONTROL_DIM; j++)\n    {\n      int val = std::min(i + 1, controller->getNumTimesteps() - 1);\n      if (i + 1 > controller->getNumTimesteps() - 1)\n      {\n        EXPECT_FLOAT_EQ(u(j, i), 0);\n      }\n      else\n      {\n        EXPECT_FLOAT_EQ(u(j, i), val);\n      }\n    }\n  }\n\n  controller->slideControlSequenceHelper(10, u);\n  for (int i = 0; i < controller->getNumTimesteps(); i++)\n  {\n    for (int j = 0; j < MockDynamics::CONTROL_DIM; j++)\n    {\n      int val = std::min(i + 11, controller->getNumTimesteps() - 1);\n      if (i + 10 > controller->getNumTimesteps() - 2)\n      {\n        EXPECT_FLOAT_EQ(u(j, i), 0);\n      }\n      else\n      {\n        EXPECT_FLOAT_EQ(u(j, i), val);\n      }\n    }\n  }\n}\n\nTEST_F(ControllerTests, computeStateTrajectoryHelper)\n{\n  TestController::state_array x = TestController::state_array::Ones();\n  TestController::state_array xdot = TestController::state_array::Ones();\n  EXPECT_CALL(*mockDynamics, step(testing::_, testing::_, testing::_, testing::_, testing::_, testing::_, dt))\n      .Times(controller->getNumTimesteps() - 1);\n\n  TestController::state_trajectory result = TestController::state_trajectory::Ones();\n  TestController::control_trajectory u = TestController::control_trajectory::Zero();\n  controller->computeStateTrajectoryHelper(result, x, u);\n\n  // TODO: Figure out if we can actually check output if output is not part of input\n  // for (int i = 0; i < controller->getNumTimesteps(); i++)\n  // {\n  //   for (int j = 0; j < MockDynamics::STATE_DIM; j++)\n  //   {\n  //     EXPECT_FLOAT_EQ(result(j, i), 1.0) << \"t: \" << i << \", s_dim: \" << j << \"\\n\";\n  //   }\n  // }\n}\n\nTEST_F(ControllerTests, interpolateControl)\n{\n  TestController::control_trajectory traj;\n  for (int i = 0; i < controller->getNumTimesteps(); i++)\n  {\n    traj.col(i) = TestController::control_array::Ones() * i;\n  }\n\n  for (double i = 0; i < controller->getNumTimesteps() - 1; i += 0.25)\n  {\n    TestController::control_array result = controller->interpolateControls(i * controller->getDt(), traj);\n    EXPECT_FLOAT_EQ(result(0), i) << i;\n  }\n}\n\nTEST_F(ControllerTests, interpolateFeedback)\n{\n  controller->initFeedback();\n  auto fb_state = controller->getFeedbackState();\n  for (int i = 0; i < fb_state.FEEDBACK_SIZE; i++)\n  {\n    fb_state.fb_gain_traj_[i] = i;\n  }\n\n  TestController::state_trajectory s_traj = TestController::state_trajectory::Zero();\n\n  TestController::state_array state = TestController::state_array::Ones();\n  for (double i = 0; i < controller->getNumTimesteps() - 1; i += 0.25)\n  {\n    TestController::state_array interpolated_state = controller->interpolateState(s_traj, i * controller->getDt());\n    TestController::control_array result =\n        controller->interpolateFeedback(state, interpolated_state, i * controller->getDt(), fb_state);\n    EXPECT_FLOAT_EQ(result(0), i);\n  }\n}\n\nTEST_F(ControllerTests, getCurrentControlTest)\n{\n  EXPECT_CALL(*mockDynamics, enforceConstraints(testing::_, testing::_)).Times(4 * (controller->getNumTimesteps() - 1));\n\n  TestController::control_trajectory traj;\n  controller->initFeedback();\n  auto fb_state = controller->getFeedbackState();\n  for (int i = 0; i < controller->getNumTimesteps(); i++)\n  {\n    for (int j = 0; j < MockDynamics::STATE_DIM * MockDynamics::CONTROL_DIM; j++)\n    {\n      int i_index = i * MockDynamics::STATE_DIM * MockDynamics::CONTROL_DIM;\n      fb_state.fb_gain_traj_[i_index + j] = i_index + j;\n    }\n    traj.col(i) = TestController::control_array::Ones() * i;\n  }\n\n  TestController::state_trajectory s_traj = TestController::state_trajectory::Zero();\n\n  TestController::state_array state = TestController::state_array::Ones();\n  for (double i = 0; i < controller->getNumTimesteps() - 1; i += 0.25)\n  {\n    TestController::state_array interpolated_state = controller->interpolateState(s_traj, i * controller->getDt());\n    TestController::control_array result =\n        controller->getCurrentControl(state, i * controller->getDt(), interpolated_state, traj, fb_state);\n    EXPECT_FLOAT_EQ(result(0), i * 2);\n  }\n}\n\nTEST_F(ControllerTests, saveControlHistoryHelper_1)\n{\n  int steps = 1;\n  TestController::control_trajectory u = TestController::control_trajectory::Random();\n  Eigen::Matrix<float, MockDynamics::CONTROL_DIM, 2> u_history;\n  u_history.setOnes();\n\n  controller->saveControlHistoryHelper(steps, u, u_history);\n\n  for (int i = 0; i < MockDynamics::CONTROL_DIM; ++i)\n  {\n    EXPECT_FLOAT_EQ(u_history(i, 0), 1.0f) << \"History column 0 failed\";\n    EXPECT_FLOAT_EQ(u_history(i, 1), u(i, steps - 1)) << \"History column 1 failed\";\n  }\n}\n\nTEST_F(ControllerTests, saveControlHistoryHelper_2)\n{\n  int steps = 4;\n  TestController::control_trajectory u = TestController::control_trajectory::Random();\n  Eigen::Matrix<float, MockDynamics::CONTROL_DIM, 2> u_history;\n  u_history.setOnes();\n\n  controller->saveControlHistoryHelper(steps, u, u_history);\n\n  for (int i = 0; i < MockDynamics::CONTROL_DIM; ++i)\n  {\n    EXPECT_FLOAT_EQ(u_history(i, 0), u(i, steps - 2)) << \"History column 0 failed\";\n    EXPECT_FLOAT_EQ(u_history(i, 1), u(i, steps - 1)) << \"History column 1 failed\";\n  }\n}\n"
  },
  {
    "path": "tests/controllers/controller_kernel_testing.cu",
    "content": "#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/controllers/ColoredMPPI/colored_mppi_controller.cuh>\n#include <mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh>\n#include <mppi/controllers/R-MPPI/robust_mppi_controller.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/cost_functions/cost.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n\nclass DoubleIntegratorDummyCost\n  : public Cost<DoubleIntegratorDummyCost, DoubleIntegratorCircleCostParams, DoubleIntegratorParams>\n{\npublic:\n  DoubleIntegratorDummyCost(cudaStream_t stream = 0)\n  {\n    bindToStream(stream);\n  }\n\n  __device__ float computeStateCost(float* s, int timestep, float* theta_c, int* crash_status)\n  {\n    float cost = SQ(s[0]);\n    for (int i = 0; i < 100; i++)\n    {\n      cost = abs(cos(cost));\n    }\n    return cost;\n  }\n\n  float computeStateCost(const Eigen::Ref<const output_array> s, int timestep, int* crash_status)\n  {\n    float cost = SQ(s[0]);\n    for (int i = 0; i < 100; i++)\n    {\n      cost = abs(cos(cost));\n    }\n    return cost;\n  }\n\n  float terminalCost(const Eigen::Ref<const output_array> s)\n  {\n    return 0;\n  }\n\n  __device__ float terminalCost(float* s, float* theta_c)\n  {\n    return 0;\n  }\n};\n\n#include <gtest/gtest.h>\n\nconst int MAX_TIMESTEPS = 200;\nusing DI_FEEDBACK_T = DDPFeedback<DoubleIntegratorDynamics, MAX_TIMESTEPS>;\n\ntemplate <int NUM_ROLLOUTS>\nusing DI_Vanilla = VanillaMPPIController<DoubleIntegratorDynamics, DoubleIntegratorDummyCost, DI_FEEDBACK_T,\n                                         MAX_TIMESTEPS, NUM_ROLLOUTS>;\n\ntemplate <int NUM_ROLLOUTS>\nusing DI_Colored = ColoredMPPIController<DoubleIntegratorDynamics, DoubleIntegratorDummyCost, DI_FEEDBACK_T,\n                                         MAX_TIMESTEPS, NUM_ROLLOUTS>;\n\ntemplate <int NUM_ROLLOUTS>\nusing DI_Tube =\n    TubeMPPIController<DoubleIntegratorDynamics, DoubleIntegratorDummyCost, DI_FEEDBACK_T, MAX_TIMESTEPS, NUM_ROLLOUTS>;\n\ntemplate <int NUM_ROLLOUTS>\nusing DI_Robust = RobustMPPIController<DoubleIntegratorDynamics, DoubleIntegratorDummyCost, DI_FEEDBACK_T,\n                                       MAX_TIMESTEPS, NUM_ROLLOUTS>;\n\n// TODO: Add more dynamics/cost function specializations\n\ntemplate <class CONTROLLER_T>\nclass ControllerKernelChoiceTest : public ::testing::Test\n{\npublic:\n  using DYN_T = typename CONTROLLER_T::TEMPLATED_DYNAMICS;\n  using COST_T = typename CONTROLLER_T::TEMPLATED_COSTS;\n  using FB_T = typename CONTROLLER_T::TEMPLATED_FEEDBACK;\n  using SAMPLER_T = typename CONTROLLER_T::TEMPLATED_SAMPLING;\n  using CONTROLLER_PARAMS_T = typename CONTROLLER_T::TEMPLATED_PARAMS;\n  using SAMPLER_PARAMS_T = typename SAMPLER_T::SAMPLING_PARAMS_T;\n\n  DYN_T* model = nullptr;\n  COST_T* cost = nullptr;\n  SAMPLER_T* sampler = nullptr;\n  FB_T* fb_controller = nullptr;\n  std::shared_ptr<CONTROLLER_T> controller;\n  mppi::util::MPPILoggerPtr logger = std::make_shared<mppi::util::MPPILogger>(mppi::util::LOG_LEVEL::DEBUG);\n\n  void SetUp() override\n  {\n    model = new DYN_T();\n    cost = new COST_T();\n    fb_controller = new FB_T(model, dt);\n\n    SAMPLER_PARAMS_T sampler_params;\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      sampler_params.std_dev[i] = std_dev;\n    }\n    sampler = new SAMPLER_T(sampler_params);\n\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n\n    CONTROLLER_PARAMS_T controller_params;\n    setUpControllerParams(controller_params);\n    controller = std::make_shared<CONTROLLER_T>(model, cost, fb_controller, sampler, controller_params, stream);\n    controller->setLogger(logger);\n    controller->setLogLevel(mppi::util::LOG_LEVEL::WARNING);\n  }\n\n  void setUpControllerParams(CONTROLLER_PARAMS_T& params)\n  {\n    params.dt_ = dt;\n    params.num_timesteps_ = num_timesteps;\n    params.dynamics_rollout_dim_ = rollout_dyn;\n    params.cost_rollout_dim_ = rollout_cost;\n  }\n\n  void TearDown() override\n  {\n    controller.reset();\n    delete fb_controller;\n    delete sampler;\n    delete model;\n    delete cost;\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n  }\n\n  cudaStream_t stream;\n\nprotected:\n  const int num_timesteps = 150;\n  float dt = 0.01;\n  // float lambda = 1.0;\n  // float alpha = 0.0;\n  float std_dev = 2.0;\n\n  dim3 rollout_dyn = dim3(64, DYN_T::STATE_DIM, 1);\n  dim3 rollout_cost = dim3(96, 1, 1);\n  dim3 eval_dyn = dim3(64, DYN_T::STATE_DIM, 1);\n  dim3 eval_cost = dim3(96, 1, 1);\n};\n\ntemplate <int NUM_ROLLOUTS>\nusing DIFFERENT_CONTROLLERS =\n    ::testing::Types<DI_Vanilla<NUM_ROLLOUTS>, DI_Colored<NUM_ROLLOUTS>, DI_Tube<NUM_ROLLOUTS>, DI_Robust<NUM_ROLLOUTS>,\n                     DI_Vanilla<NUM_ROLLOUTS * 4>, DI_Colored<NUM_ROLLOUTS * 4>, DI_Tube<NUM_ROLLOUTS * 4>,\n                     DI_Robust<NUM_ROLLOUTS * 4>, DI_Vanilla<NUM_ROLLOUTS * 16>, DI_Colored<NUM_ROLLOUTS * 16>,\n                     DI_Tube<NUM_ROLLOUTS * 16>, DI_Robust<NUM_ROLLOUTS * 16>, DI_Vanilla<NUM_ROLLOUTS * 64>,\n                     DI_Colored<NUM_ROLLOUTS * 64>, DI_Tube<NUM_ROLLOUTS * 64>, DI_Robust<NUM_ROLLOUTS * 64>>;\n\nTYPED_TEST_SUITE(ControllerKernelChoiceTest, DIFFERENT_CONTROLLERS<128>);\n\nTYPED_TEST(ControllerKernelChoiceTest, CheckAppropriateKernelSelection)\n{\n  const int further_evaluations = 20;\n\n  auto empty_state = this->model->getZeroState();\n  this->controller->setLogLevel(mppi::util::LOG_LEVEL::DEBUG);\n  this->controller->chooseAppropriateKernel();\n  auto auto_kernel_choice = this->controller->getKernelChoiceAsEnum();\n\n  // Start testing single kernel\n  this->controller->setKernelChoice(kernelType::USE_SINGLE_KERNEL);\n  auto start_single_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < further_evaluations; i++)\n  {\n    this->controller->computeControl(empty_state, 1);\n  }\n  auto end_single_kernel_time = std::chrono::steady_clock::now();\n\n  // Start testing split kernel\n  this->controller->setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n  auto start_split_kernel_time = std::chrono::steady_clock::now();\n  for (int i = 0; i < further_evaluations; i++)\n  {\n    this->controller->computeControl(empty_state, 1);\n  }\n  auto end_split_kernel_time = std::chrono::steady_clock::now();\n\n  float single_kernel_duration = mppi::math::timeDiffms(end_single_kernel_time, start_single_kernel_time);\n  float split_kernel_duration = mppi::math::timeDiffms(end_split_kernel_time, start_split_kernel_time);\n\n  if (fabsf(single_kernel_duration - split_kernel_duration) < 1.0f)\n  {  // the kernels are within 1 ms of each other\n    this->logger->info(\"Durations of both kernels too close to determine winner: split = %f ms, single = %f ms\\n\",\n                       split_kernel_duration, single_kernel_duration);\n  }\n  else\n  {\n    ASSERT_EQ(split_kernel_duration <= single_kernel_duration, auto_kernel_choice == kernelType::USE_SPLIT_KERNELS)\n        << \"chooseAppropriateKernel() did not choose the faster kernel, single: \" << single_kernel_duration\n        << \" ms, split: \" << split_kernel_duration;\n  }\n}\n\nTYPED_TEST(ControllerKernelChoiceTest, KernelChoiceThroughSharedMemCheck)\n{\n  auto controller_params = this->controller->getParams();\n  // set Rollout Cost dim so high as to ensure the kernel would run out of shared mem if used\n  controller_params.cost_rollout_dim_.x = 320000;\n  this->controller->setParams(controller_params);\n  this->controller->chooseAppropriateKernel();\n  ASSERT_EQ(this->controller->getKernelChoiceAsEnum(), kernelType::USE_SINGLE_KERNEL);\n}\n\nTYPED_TEST(ControllerKernelChoiceTest, NoUsableKernelCheck)\n{\n  auto controller_params = this->controller->getParams();\n  // set Rollout Cost dim so high as to ensure the kernel would run out of shared mem if used\n  controller_params.cost_rollout_dim_.x = 320000;\n  controller_params.dynamics_rollout_dim_.x = 320000;\n  this->controller->setParams(controller_params);\n  EXPECT_ANY_THROW({ this->controller->chooseAppropriateKernel(); });\n}\n\nTYPED_TEST(ControllerKernelChoiceTest, MoreEvaluationsDoNotAdjustChoice)\n{\n  int num_evaluations = this->controller->getNumKernelEvaluations();\n  this->controller->setLogLevel(mppi::util::LOG_LEVEL::DEBUG);\n  this->controller->chooseAppropriateKernel();\n  auto curr_select_kernel = this->controller->getKernelChoiceAsEnum();\n\n  float prev_num_eval_time = 0;\n\n  for (int i = 0; i < 5; i++)\n  {\n    this->controller->setNumKernelEvaluations(num_evaluations);\n    auto start_num_eval_time = std::chrono::steady_clock::now();\n    this->controller->chooseAppropriateKernel();\n    auto end_num_eval_time = std::chrono::steady_clock::now();\n    float num_eval_time = mppi::math::timeDiffms(end_num_eval_time, start_num_eval_time);\n    ASSERT_EQ(this->controller->getKernelChoiceAsEnum(), curr_select_kernel);\n    if (i != 0)\n    {\n      ASSERT_TRUE(prev_num_eval_time < num_eval_time);\n    }\n    prev_num_eval_time = num_eval_time;\n    num_evaluations *= 2;\n  }\n}\n"
  },
  {
    "path": "tests/controllers/rmppi_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/controllers/R-MPPI/robust_mppi_controller.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_robust_cost.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n\n#include <cnpy.h>\n#include <random>  // Used to generate random noise for control trajectories\n/******************************************************************************\n * Test class for RobustControllerPrivateMethods\n ******************************************************************************/\nconst int NUM_TIMESTEPS = 100;\nusing DYN = DoubleIntegratorDynamics;\nusing COST = DoubleIntegratorCircleCost;\nusing SAMPLING = mppi::sampling_distributions::GaussianDistribution<DYN::DYN_PARAMS_T>;\nusing SAMPLER_PARAMS = SAMPLING::SAMPLING_PARAMS_T;\nusing FB_CONTROLLER = DDPFeedback<DYN, NUM_TIMESTEPS>;\nusing CONTROLLER_T = RobustMPPIController<DYN, COST, FB_CONTROLLER, NUM_TIMESTEPS, 2048, SAMPLING>;\n\nclass TestRobust : public CONTROLLER_T\n{\npublic:\n  TestRobust(DYN* model, COST* cost, DDPFeedback<DYN, NUM_TIMESTEPS>* fb_controller, SAMPLING* sampler, float dt,\n             int max_iter, float lambda, float alpha, float value_function_threshold, int num_timesteps,\n             const Eigen::Ref<const control_trajectory>& init_control_traj, cudaStream_t stream)\n    : RobustMPPIController(model, cost, fb_controller, sampler, dt, max_iter, lambda, alpha, value_function_threshold,\n                           num_timesteps, init_control_traj, 9, 1, stream)\n  {\n  }\n  TestRobust(DYN* model, COST* cost, DDPFeedback<DYN, NUM_TIMESTEPS>* fb_controller, SAMPLING* sampler,\n             RobustMPPIController::TEMPLATED_PARAMS& params, cudaStream_t stream)\n    : RobustMPPIController(model, cost, fb_controller, sampler, params, stream)\n  {\n  }\n\n  // Test to make sure that its nonzero\n  // Test to make sure that cuda memory is allocated\n  NominalCandidateVector getCandidates(const Eigen::Ref<const state_array>& nominal_x_k,\n                                       const Eigen::Ref<const state_array>& nominal_x_kp1,\n                                       const Eigen::Ref<const state_array>& real_x_kp1)\n  {\n    getInitNominalStateCandidates(nominal_x_k, nominal_x_kp1, real_x_kp1);\n    return candidate_nominal_states_;\n  };\n\n  Eigen::MatrixXf getWeights()\n  {\n    return line_search_weights_;\n  };\n\n  void updateCandidates(int value)\n  {\n    updateNumCandidates(value);\n  }\n\n  bool getCudaMemStatus()\n  {\n    return importance_sampling_cuda_mem_init_;\n  }\n\n  void deallocateNSCMemory()\n  {\n    deallocateNominalStateCandidateMemory();\n  }\n\n  void resetNSCMemory()\n  {\n    resetCandidateCudaMem();\n  }\n\n  Eigen::MatrixXi getStrideIS(int stride)\n  {\n    computeImportanceSamplerStride(stride);\n    return importance_sampler_strides_;\n  }\n\n  float getComputeCandidateBaseline(const Eigen::Ref<const Eigen::MatrixXf>& traj_costs_in)\n  {\n    candidate_trajectory_costs_ = traj_costs_in;\n    return computeCandidateBaseline();\n  }\n\n  int getComputeBestIndex(const Eigen::Ref<const Eigen::MatrixXf>& traj_costs_in)\n  {\n    candidate_trajectory_costs_ = traj_costs_in;\n    computeBestIndex();\n    return best_index_;\n  }\n\n  state_array getNominalStateFromOptimization(const Eigen::Ref<const state_array>& nominal_x_k,\n                                              const Eigen::Ref<const state_array>& nominal_x_kp1,\n                                              const Eigen::Ref<const state_array>& real_x_kp1, bool nominal_state_init)\n  {\n    nominal_state_trajectory_.col(0) = nominal_x_k;\n    nominal_state_trajectory_.col(1) = nominal_x_kp1;\n    nominal_state_init_ = nominal_state_init;\n    computeNominalStateAndStride(real_x_kp1, 1);  // Default the stride to 1\n    return nominal_state_;\n  }\n\n  float* getFeedbackGainVector()\n  {\n    return this->fb_controller_->getFeedbackState().fb_gain_traj_;\n  }\n\n  TEMPLATED_FEEDBACK::feedback_gain_trajectory getFeedbackGainsEigen()\n  {\n    return this->fb_controller_->getFeedbackGainsEigen();\n  }\n};\n\n// Text fixture for nominal state selection\nclass RMPPINominalStateCandidates : public ::testing::Test\n{\npublic:\nprotected:\n  void SetUp() override\n  {\n    model = new DYN(10);  // Initialize the double integrator dynamics\n    cost = new COST();    // Initialize the cost function\n    fb_controller = new FB_CONTROLLER(model, dt);\n    SAMPLER_PARAMS sampler_params;\n    sampler_params.std_dev[0] = 0.001;\n    sampler_params.std_dev[1] = 0.001;\n    sampler = new SAMPLING(sampler_params);\n    controller_params.dt_ = dt;\n    controller_params.lambda_ = lambda;\n    controller_params.alpha_ = alpha;\n    // controller_params.control_std_dev_ << 0.0001, 0.0001;\n    controller_params.num_iters_ = 3;\n    controller_params.value_function_threshold_ = 1000.0;\n    controller_params.num_timesteps_ = 100;\n    controller_params.init_control_traj_.setZero();\n\n    controller_params.dynamics_rollout_dim_ = dim3(64, 4, 2);\n    controller_params.cost_rollout_dim_ = dim3(64, 4, 2);\n    controller_params.eval_dyn_kernel_dim_ = dim3(64, 4, 1);\n    controller_params.eval_cost_kernel_dim_ = dim3(controller_params.num_timesteps_, 1, 1);\n\n    // Q, Qf, R\n    auto fb_params = fb_controller->getParams();\n    fb_params.Q.setIdentity();\n    fb_params.Q_f.setIdentity();\n    fb_params.R.setIdentity();\n    fb_controller->setParams(fb_params);\n    test_controller = new TestRobust(model, cost, fb_controller, sampler, controller_params, 0);\n  }\n\n  void TearDown() override\n  {\n    delete test_controller;\n    delete model;\n    delete cost;\n    delete fb_controller;\n    delete sampler;\n  }\n\n  DYN* model;\n  COST* cost;\n  TestRobust* test_controller;\n  FB_CONTROLLER* fb_controller;\n  SAMPLING* sampler;\n  TestRobust::TEMPLATED_PARAMS controller_params;\n  float dt = 0.01;\n  float lambda = 0.5;\n  float alpha = 0.01;\n};\n\nTEST_F(RMPPINominalStateCandidates, UpdateNumCandidates_LessThan3)\n{\n  ::testing::FLAGS_gtest_death_test_style = \"threadsafe\";\n  ASSERT_DEATH(test_controller->updateCandidates(1), \"ERROR: number of candidates must be greater or equal to 3\\n\");\n}\n\nTEST_F(RMPPINominalStateCandidates, UpdateNumCandidates_Negative)\n{\n  ::testing::FLAGS_gtest_death_test_style = \"threadsafe\";\n  ASSERT_DEATH(test_controller->updateCandidates(-1), \"ERROR: number of candidates must be greater or equal to 3\\n\");\n}\n\nTEST_F(RMPPINominalStateCandidates, UpdateNumCandidates_Even)\n{\n  ::testing::FLAGS_gtest_death_test_style = \"threadsafe\";\n  ASSERT_DEATH(test_controller->updateCandidates(4), \"ERROR: number of candidates must be odd\\n\");\n}\n\nTEST_F(RMPPINominalStateCandidates, UpdateNumCandidates_TooManyCandidates)\n{\n  ::testing::FLAGS_gtest_death_test_style = \"threadsafe\";\n  ASSERT_DEATH(test_controller->updateCandidates(99), \"cannot exceed\");\n}\n\nTEST_F(RMPPINominalStateCandidates, CandidateVectorSizeNonzero)\n{\n  DYN::state_array x_star_k, x_star_kp1, x_kp1;\n  x_star_k << -4, 0, 0, 0;\n  x_star_kp1 << 4, 0, 0, 0;\n  x_kp1 << 4, 4, 0, 0;\n  auto candidates = test_controller->getCandidates(x_star_k, x_star_kp1, x_kp1);\n  ASSERT_TRUE(candidates.size() > 0);\n}\n\nTEST_F(RMPPINominalStateCandidates, CudaMemoryInitialized)\n{\n  ASSERT_TRUE(test_controller->getCudaMemStatus());\n}\n\nTEST_F(RMPPINominalStateCandidates, CudaMemoryRemoved)\n{\n  test_controller->deallocateNSCMemory();\n  ASSERT_FALSE(test_controller->getCudaMemStatus());\n}\n\nTEST_F(RMPPINominalStateCandidates, CudaMemoryReset)\n{\n  test_controller->deallocateNSCMemory();  // Remove memory\n  ASSERT_FALSE(test_controller->getCudaMemStatus());\n  test_controller->resetNSCMemory();  // Should allocate\n  ASSERT_TRUE(test_controller->getCudaMemStatus());\n  test_controller->resetNSCMemory();  // Should remove then allocate again\n  ASSERT_TRUE(test_controller->getCudaMemStatus());\n}\n\nTEST_F(RMPPINominalStateCandidates, LineSearchWeights_9)\n{\n  test_controller->updateCandidates(9);\n  auto controller_weights = test_controller->getWeights();\n  Eigen::MatrixXf known_weights(3, 9);\n  known_weights << 1, 3.0 / 4, 1.0 / 2, 1.0 / 4, 0, 0, 0, 0, 0, 0, 1.0 / 4, 1.0 / 2, 3.0 / 4, 1, 3.0 / 4, 1.0 / 2,\n      1.0 / 4, 0, 0, 0, 0, 0, 0, 1.0 / 4, 1.0 / 2, 3.0 / 4, 1;\n\n  //  std::cout << controller_weights << std::endl;\n  //  std::cout << known_weights << std::endl;\n  ASSERT_TRUE(controller_weights == known_weights) << \"Known Weights: \\n\"\n                                                   << known_weights << \"\\nComputed Weights: \\n\"\n                                                   << controller_weights;\n}\n\nTEST_F(RMPPINominalStateCandidates, ImportanceSampler_Stride_2)\n{\n  int stride = 2;\n  test_controller->updateCandidates(9);\n  Eigen::MatrixXi known_stride(1, 9);\n  known_stride << 0, 1, 1, 2, 2, 2, 2, 2, 2;\n  auto compute_stride = test_controller->getStrideIS(stride);\n  ASSERT_TRUE(known_stride == compute_stride) << \"Known Stride: \\n\"\n                                              << known_stride << \"\\nComputed Stride: \\n\"\n                                              << compute_stride;\n}\n\nTEST_F(RMPPINominalStateCandidates, ImportanceSampler_Stride_4)\n{\n  int stride = 4;\n  test_controller->updateCandidates(9);\n  Eigen::MatrixXi known_stride(1, 9);\n  known_stride << 0, 1, 2, 3, 4, 4, 4, 4, 4;\n  auto compute_stride = test_controller->getStrideIS(stride);\n  ASSERT_TRUE(known_stride == compute_stride) << \"Known Stride: \\n\"\n                                              << known_stride << \"\\nComputed Stride: \\n\"\n                                              << compute_stride;\n}\n\nTEST_F(RMPPINominalStateCandidates, InitEvalSelection_Weights)\n{\n  /*\n   * This test will ensure that the line search process to select the\n   * number of points for free energy evaluation is correct.\n   */\n  DYN::state_array x_star_k, x_star_kp1, x_kp1;\n  x_star_k << -4, 0, 0, 0;\n  x_star_kp1 << 0, 4, 0, 0;\n  x_kp1 << 4, 4, 0, 0;\n\n  const int num_candidates = 9;\n  test_controller->updateCandidates(num_candidates);\n\n  Eigen::Matrix<float, 4, num_candidates> known_candidates;\n  known_candidates << -4, -3, -2, -1, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n      0, 0, 0, 0, 0;\n\n  // Create the controller so we can test functions from it -> this should be a fixture\n  auto candidates = test_controller->getCandidates(x_star_k, x_star_kp1, x_kp1);\n\n  for (int i = 0; i < num_candidates; ++i)\n  {\n    ASSERT_TRUE(candidates[i] == known_candidates.col(i)) << \"Index: \" << i << \"\\nKnown Point: \\n\"\n                                                          << known_candidates.col(i) << \"\\nComputed Point: \\n\"\n                                                          << candidates[i];\n  }\n}\n\nclass RMPPINominalStateSelection : public ::testing::Test\n{\npublic:\n  int num_samples = 64;\n  int num_candidates = 9;\n  Eigen::MatrixXf trajectory_costs;\n\nprotected:\n  void SetUp() override\n  {\n    model = new DYN(10);  // Initialize the double integrator dynamics\n    cost = new COST;      // Initialize the cost function\n    fb_controller = new FB_CONTROLLER(model, dt);\n    SAMPLER_PARAMS sampler_params;\n    sampler_params.std_dev[0] = 0.001;\n    sampler_params.std_dev[1] = 0.001;\n    sampler = new SAMPLING(sampler_params);\n    init_control_traj.setZero();\n\n    // Q, Qf, R\n    auto fb_params = fb_controller->getParams();\n    fb_params.Q.setIdentity();\n    fb_params.Q_f.setIdentity();\n    fb_params.R.setIdentity();\n    fb_controller->setParams(fb_params);\n\n    test_controller =\n        new TestRobust(model, cost, fb_controller, sampler, dt, 3, lambda, alpha, 1000.0, 100, init_control_traj, 0);\n    auto controller_params = test_controller->getParams();\n    controller_params.dynamics_rollout_dim_ = dim3(64, 4, 2);\n    controller_params.cost_rollout_dim_ = dim3(64, 1, 2);\n    controller_params.eval_dyn_kernel_dim_ = dim3(num_samples, 4, 1);\n    controller_params.eval_cost_kernel_dim_ = dim3(controller_params.num_timesteps_, 1, 1);\n    test_controller->setParams(controller_params);\n    test_controller->chooseAppropriateKernel();\n\n    // Set the size of the trajectory costs function\n    trajectory_costs.resize(num_samples * num_candidates, 1);\n\n    // Fill the trajectory costs with random costs\n    trajectory_costs = Eigen::MatrixXf::Random(num_samples * num_candidates, 1);\n  }\n\n  void TearDown() override\n  {\n    delete test_controller;\n    delete cost;\n    delete model;\n    delete fb_controller;\n    delete sampler;\n  }\n\n  DYN* model;\n  COST* cost;\n  TestRobust* test_controller;\n  FB_CONTROLLER* fb_controller;\n  SAMPLING* sampler;\n  TestRobust::control_trajectory init_control_traj;\n  float dt = 0.01;\n  float lambda = 0.5;\n  float alpha = 0.01;\n};\n\nTEST_F(RMPPINominalStateSelection, GetCandidateBaseline)\n{\n  // Compute baseline\n  float baseline = trajectory_costs(0);\n  for (int i = 0; i < trajectory_costs.size(); i++)\n  {\n    if (trajectory_costs(i) < baseline)\n    {\n      baseline = trajectory_costs(i);\n    }\n  }\n\n  //\n  float compute_baseline = test_controller->getComputeCandidateBaseline(trajectory_costs);\n\n  ASSERT_FLOAT_EQ(baseline, compute_baseline);\n}\n\nTEST_F(RMPPINominalStateSelection, ComputeBestCandidate)\n{\n  float baseline = trajectory_costs(0);\n  for (int i = 0; i < num_samples; i++)\n  {\n    if (trajectory_costs(i) < baseline)\n    {\n      baseline = trajectory_costs(i);\n    }\n  }\n  float value_func_threshold_ = 1000.0;\n\n  Eigen::MatrixXf candidate_free_energy;\n  candidate_free_energy.resize(num_candidates, 1);\n  candidate_free_energy.setZero();\n  // Should probably be in a cuda kernel? Will have to profile.\n  for (int i = 0; i < num_candidates; i++)\n  {\n    for (int j = 0; j < num_samples; j++)\n    {\n      candidate_free_energy(i) += expf(-1.0 / lambda * (trajectory_costs(i * num_samples + j) - baseline));\n    }\n  }\n  for (int i = 0; i < num_candidates; i++)\n  {\n    candidate_free_energy(i) /= (1.0 * num_samples);\n  }\n\n  for (int i = 0; i < num_candidates; i++)\n  {\n    candidate_free_energy(i) = -lambda * logf(candidate_free_energy(i)) + baseline;\n  }\n\n  // Now get the closest initial condition that is above the threshold.\n  int bestIdx = 0;\n  for (int i = 1; i < num_candidates; i++)\n  {\n    if (candidate_free_energy(i) < value_func_threshold_)\n    {\n      bestIdx = i;\n    }\n  }\n\n  int best_index_compute = test_controller->getComputeBestIndex(trajectory_costs);\n\n  ASSERT_EQ(bestIdx, best_index_compute);\n}\n\nTEST_F(RMPPINominalStateSelection, ComputeNominalStateAndStride_InitFalse)\n{\n  // Set the number of candidates to 3\n  const int num_candidates = 3;\n  test_controller->updateCandidates(num_candidates);\n\n  // We know that the cost penalizes any trajectory that exists our donut which\n  // is centered around the origin with radius 2. Set the 3 relevant points of\n  // the system such that x_k_star (the previous nominal state) is the best\n  // free energy candidate.\n  DYN::state_array nominal_x_k, nominal_x_kp1, real_x_kp1;\n  nominal_x_k << 2, 0, 0, 0;\n  nominal_x_kp1 << 5, 0, 0, 0;\n  real_x_kp1 << 5, 0, 0, 0;\n\n  bool nominal_state_init = false;\n\n  auto nominal_state =\n      test_controller->getNominalStateFromOptimization(nominal_x_k, nominal_x_kp1, real_x_kp1, nominal_state_init);\n\n  // Since nominal state init is false, this should be the real state\n  ASSERT_EQ(real_x_kp1, nominal_state) << \"\\nExpected state << \\n\"\n                                       << real_x_kp1 << \"\\nComputed state: \\n\"\n                                       << nominal_state;\n}\n\nTEST_F(RMPPINominalStateSelection, ComputeNominalStateAndStride_PreviousNominal)\n{\n  // Set the number of candidates to 3\n  const int num_candidates = 3;\n  test_controller->updateCandidates(num_candidates);\n\n  // We know that the cost penalizes any trajectory that exists our donut which\n  // is centered around the origin with radius 2. Set the 3 relevant points of\n  // the system such that x_k_star (the previous nominal state) is the best\n  // free energy candidate.\n  DYN::state_array nominal_x_k, nominal_x_kp1, real_x_kp1;\n  nominal_x_k << 2, 0, 0, 0;\n  nominal_x_kp1 << 15, 0, 0, 0;\n  real_x_kp1 << 15, 0, 0, 0;\n\n  bool nominal_state_init = true;\n\n  auto nominal_state =\n      test_controller->getNominalStateFromOptimization(nominal_x_k, nominal_x_kp1, real_x_kp1, nominal_state_init);\n\n  // Since nominal state init is false, this should be the real state\n  ASSERT_EQ(nominal_x_k, nominal_state) << \"\\nExpected state << \\n\"\n                                        << nominal_x_k << \"\\nComputed state: \\n\"\n                                        << nominal_state;\n}\n\nTEST_F(RMPPINominalStateSelection, ComputeNominalStateAndStride_CurrentNominal)\n{\n  // Set the number of candidates to 3\n  const int num_candidates = 3;\n  test_controller->updateCandidates(num_candidates);\n\n  // We know that the cost penalizes any trajectory that exists our donut which\n  // is centered around the origin with radius 2. Set the 3 relevant points of\n  // the system such that x_k_star (the nominal state) is the best\n  // free energy candidate.\n  DYN::state_array nominal_x_k, nominal_x_kp1, real_x_kp1;\n  nominal_x_k << -100, 0, 0, 0;\n  nominal_x_kp1 << 2, 0, 0, 0;\n  real_x_kp1 << 100, 0, 0, 0;\n\n  bool nominal_state_init = true;\n\n  auto nominal_state =\n      test_controller->getNominalStateFromOptimization(nominal_x_k, nominal_x_kp1, real_x_kp1, nominal_state_init);\n\n  // Since nominal state init is false, this should be the real state\n  ASSERT_EQ(nominal_x_kp1, nominal_state) << \"\\nExpected state << \\n\"\n                                          << nominal_x_kp1 << \"\\nComputed state: \\n\"\n                                          << nominal_state;\n}\n\nTEST_F(RMPPINominalStateSelection, ComputeNominalStateAndStride_CurrentReal)\n{\n  // Set the number of candidates to 3\n  const int num_candidates = 3;\n  test_controller->updateCandidates(num_candidates);\n\n  // We know that the cost penalizes any trajectory that exists our donut which\n  // is centered around the origin with radius 2. Set the 3 relevant points of\n  // the system such that x_k_star (the previous nominal state) is the best\n  // free energy candidate.\n  DYN::state_array nominal_x_k, nominal_x_kp1, real_x_kp1;\n  nominal_x_k << -100, 0, 0, 0;\n  nominal_x_kp1 << -100, 0, 0, 0;\n  real_x_kp1 << 2, 0, 0, 0;\n\n  bool nominal_state_init = true;\n\n  auto nominal_state =\n      test_controller->getNominalStateFromOptimization(nominal_x_k, nominal_x_kp1, real_x_kp1, nominal_state_init);\n\n  // Since nominal state init is false, this should be the real state\n  ASSERT_EQ(real_x_kp1, nominal_state) << \"\\nExpected state << \\n\"\n                                       << real_x_kp1 << \"\\nComputed state: \\n\"\n                                       << nominal_state;\n}\n\nTEST_F(RMPPINominalStateSelection, DDPFeedbackGainInternalStorage)\n{\n  DYN::state_array x;\n  x << 2, 0, 0, 0;\n  int stride = 1;\n  test_controller->updateImportanceSamplingControl(x, stride);\n  test_controller->updateImportanceSamplingControl(x, stride);\n  auto fb_parm = test_controller->getFeedbackState();\n  auto feedback_gain_eigen_aligned = test_controller->getFeedbackGainsEigen();\n  for (size_t i = 0; i < feedback_gain_eigen_aligned.size(); i++)\n  {\n    int i_index = i * DYN::STATE_DIM * DYN::CONTROL_DIM;\n    for (size_t j = 0; j < DYN::CONTROL_DIM * DYN::STATE_DIM; j++)\n    {\n      ASSERT_FLOAT_EQ(fb_parm.fb_gain_traj_[i_index + j], feedback_gain_eigen_aligned[i].data()[j]) << \" at i = \" << i;\n    }\n  }\n}\n\nbool tubeFailure(float* s)\n{\n  float inner_path_radius2 = 1.675 * 1.675;\n  float outer_path_radius2 = 2.325 * 2.325;\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  if ((radial_position < inner_path_radius2) || (radial_position > outer_path_radius2))\n  {\n    return true;\n  }\n  else\n  {\n    return false;\n  }\n}\n\nTEST(RMPPITest, RobustMPPILargeVariance)\n{\n  using DYNAMICS = DoubleIntegratorDynamics;\n  using COST_T = DoubleIntegratorCircleCost;\n  const int num_timesteps = 50;  // Optimization time horizon\n  using FEEDBACK_T = DDPFeedback<DYNAMICS, num_timesteps>;\n  // Noise enters the system during the \"true\" state propagation. In this case the noise is nominal\n  DYNAMICS model(100);  // Initialize the double integrator dynamics\n  COST_T cost;          // Initialize the cost function\n  float dt = 0.02;      // Timestep of dynamics propagation\n  int max_iter = 1;     // Maximum running iterations of optimization\n  float lambda = 4;     // Learning rate parameter\n  float alpha = 0.00;\n  const int total_time_horizon = 5000;\n  FEEDBACK_T fb_controller(&model, dt);\n  SAMPLING::SAMPLING_PARAMS_T sampler_params;\n  // control variance\n  sampler_params.std_dev[0] = 1;\n  sampler_params.std_dev[1] = 1;\n  SAMPLING sampler = SAMPLING(sampler_params);\n\n  std::vector<float> actual_trajectory_save(num_timesteps * total_time_horizon * DYNAMICS::STATE_DIM);\n  std::vector<float> nominal_trajectory_save(num_timesteps * total_time_horizon * DYNAMICS::STATE_DIM);\n  // std::vector<float> ancillary_trajectory_save(num_timesteps*total_time_horizon*DYNAMICS::STATE_DIM);\n\n  // Set the initial state\n  DYNAMICS::state_array x;\n  x << 2, 0, 0, 1;\n\n  DYNAMICS::state_array xdot;\n\n  // DDP cost parameters\n  auto fb_params = fb_controller.getParams();\n  /**\n   * Q =\n   * [500, 0, 0, 0\n   *  0, 500, 0, 0\n   *  0, 0, 100, 0\n   *  0, 0, 0, 100]\n   */\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  /**\n   * Qf = I\n   */\n  fb_params.Q_f = DYNAMICS::dfdx::Identity();\n  /**\n   * R = I\n   */\n  fb_params.R = FEEDBACK_T::square_control_matrix::Identity();\n  fb_controller.setParams(fb_params);\n\n  // Value function threshold\n  float value_function_threshold = 10.0;\n\n  // DoubleIntegratorRobustCost cost2;\n  // auto controller2 = RobustMPPIController<DYNAMICS, DoubleIntegratorRobustCost, num_timesteps,\n  //         1024, 64, 8, 1>(&model, &cost2, dt, max_iter, gamma, value_function_threshold, Q, Qf, R, control_var);\n\n  // Initialize the R MPPI controller\n  auto controller = RobustMPPIController<DYNAMICS, COST_T, FEEDBACK_T, num_timesteps, 1024, SAMPLING>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha, value_function_threshold);\n\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 2);\n  controller_params.cost_rollout_dim_ = dim3(num_timesteps, 1, 2);\n  controller_params.eval_dyn_kernel_dim_ = dim3(64, 4, 1);\n  controller_params.eval_cost_kernel_dim_ = dim3(num_timesteps, 1, 1);\n  controller.setParams(controller_params);\n\n  int fail_count = 0;\n\n  // Start the while loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    // Print the system state\n    if (t % 100 == 0)\n    {\n      printf(\"Current Time: %f  \", t * dt);\n      model.printState(x.data());\n      std::cout << \"                    Candidate Free Energies: \" << controller.getCandidateFreeEnergy().transpose()\n                << std::endl;\n    }\n\n    if (cost.computeStateCost(x) > 1000)\n    {\n      fail_count++;\n    }\n\n    if (tubeFailure(x.data()))\n    {\n      cnpy::npy_save(\"robust_sc_large_actual.npy\", actual_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM }, \"w\");\n      // cnpy::npy_save(\"robust_sc_ancillary.npy\", ancillary_trajectory_save.data(),\n      //                {total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM},\"w\");\n      cnpy::npy_save(\"robust_sc_large_nominal.npy\", nominal_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM }, \"w\");\n      printf(\"Current Time: %f  \", t * dt);\n      model.printState(x.data());\n      std::cout << \"                    Candidate Free Energies: \" << controller.getCandidateFreeEnergy().transpose()\n                << std::endl;\n      std::cout << \"Tube failure!!\" << std::endl;\n      FAIL() << \"Visualize the trajectories by running scripts/double_integrator/plot_DI_test_trajectories; \"\n                \"the argument to this python file is the build directory of MPPI-Generic\";\n    }\n    // Update the importance sampler\n    controller.updateImportanceSamplingControl(x, 1);\n\n    // Compute the control\n    controller.computeControl(x);\n\n    // Save the trajectory from the nominal state\n    auto nominal_trajectory = controller.getTargetStateSeq();\n\n    // Save the ancillary trajectory\n    // auto ancillary_trajectory = controller.getAncillaryStateSeq();\n\n    for (int i = 0; i < num_timesteps; i++)\n    {\n      for (int j = 0; j < DYNAMICS::STATE_DIM; j++)\n      {\n        actual_trajectory_save[t * num_timesteps * DYNAMICS::STATE_DIM + i * DYNAMICS::STATE_DIM + j] = x(j);\n        // ancillary_trajectory_save[t * num_timesteps * DYNAMICS::STATE_DIM +\n        //                           i*DYNAMICS::STATE_DIM + j] = ancillary_trajectory(j, i);\n        nominal_trajectory_save[t * num_timesteps * DYNAMICS::STATE_DIM + i * DYNAMICS::STATE_DIM + j] =\n            nominal_trajectory(j, i);\n      }\n    }\n    // Get the open loop control\n    DYNAMICS::control_array current_control = controller.getControlSeq().col(0);\n    //    std::cout << \"Current OL control: \" << current_control.transpose() << std::endl;\n\n    // Apply the feedback given the current state\n    current_control += controller.getFeedbackControl(x, controller.getTargetStateSeq().col(0), 0);\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add the \"true\" noise of the system\n    model.computeStateDisturbance(dt, x);\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n\n  cnpy::npy_save(\"robust_sc_large_actual.npy\", actual_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM }, \"w\");\n  // cnpy::npy_save(\"robust_sc_ancillary.npy\",ancillary_trajectory_save.data(),\n  //                {total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM},\"w\");\n  cnpy::npy_save(\"robust_sc_large_nominal.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM }, \"w\");\n}\n\nTEST(RMPPITest, RobustMPPILargeVarianceRobustCost)\n{\n  using DYNAMICS = DoubleIntegratorDynamics;\n  using COST_T = DoubleIntegratorRobustCost;\n  const int num_timesteps = 50;  // Optimization time horizon\n  using FEEDBACK_T = DDPFeedback<DYNAMICS, num_timesteps>;\n\n  float dt = 0.02;  // Timestep of dynamics propagation\n  // Noise enters the system during the \"true\" state propagation. In this case the noise is nominal\n  DYNAMICS model(100);  // Initialize the double integrator dynamics\n  COST_T cost;          // Initialize the cost function\n  FEEDBACK_T fb_controller(&model, dt);\n  auto params = cost.getParams();\n  params.velocity_desired = 2;\n  params.crash_cost = 100;\n  cost.setParams(params);\n  SAMPLING::SAMPLING_PARAMS_T sampler_params;\n  // control variance\n  sampler_params.std_dev[0] = 1;\n  sampler_params.std_dev[1] = 1;\n  SAMPLING sampler = SAMPLING(sampler_params);\n  int max_iter = 3;    // Maximum running iterations of optimization\n  float lambda = 2.0;  // Learning rate parameter\n  float alpha = 0.0;\n  int crash_status[1] = { 0 };\n  const int total_time_horizon = 5000;\n\n  std::vector<float> actual_trajectory_save(num_timesteps * total_time_horizon * DoubleIntegratorDynamics::STATE_DIM);\n  std::vector<float> nominal_trajectory_save(num_timesteps * total_time_horizon * DoubleIntegratorDynamics::STATE_DIM);\n  // std::vector<float> ancillary_trajectory_save(num_timesteps*total_time_horizon*DoubleIntegratorDynamics::STATE_DIM);\n  std::vector<float> feedback_trajectory_save(num_timesteps * total_time_horizon * DoubleIntegratorDynamics::STATE_DIM);\n\n  // Set the initial state\n  DYNAMICS::state_array x;\n  x << 2, 0, 0, 1;\n\n  DYNAMICS::state_array xdot;\n\n  // DDP cost parameters\n  auto fb_params = fb_controller.getParams();\n  /**\n   * Q =\n   * [500, 0, 0, 0\n   *  0, 500, 0, 0\n   *  0, 0, 100, 0\n   *  0, 0, 0, 100]\n   */\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  /**\n   * Qf = I\n   */\n  fb_params.Q_f = DYNAMICS::dfdx::Identity();\n  /**\n   * R = I\n   */\n  fb_params.R = FEEDBACK_T::square_control_matrix::Identity();\n  // fb_params.num_iterations = 4;\n  fb_controller.setParams(fb_params);\n\n  // Value function threshold\n  float value_function_threshold = 10.0;\n\n  // DoubleIntegratorRobustCost cost2;\n  // auto controller2 = RobustMPPIController<DYNAMICS, DoubleIntegratorRobustCost, num_timesteps,\n  //         1024, 64, 8, 1>(&model, &cost2, dt, max_iter, gamma, value_function_threshold, Q, Qf, R, control_var);\n\n  using CONTROLLER_PARAMS =\n      typename RobustMPPIController<DYNAMICS, COST_T, FEEDBACK_T, num_timesteps, 1024, SAMPLING>::TEMPLATED_PARAMS;\n  CONTROLLER_PARAMS controller_params;\n  controller_params.dt_ = dt;\n  controller_params.num_iters_ = max_iter;\n  controller_params.lambda_ = lambda;\n  controller_params.alpha_ = alpha;\n  controller_params.value_function_threshold_ = value_function_threshold;\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 2);\n  controller_params.cost_rollout_dim_ = dim3(num_timesteps, 1, 2);\n  controller_params.eval_dyn_kernel_dim_ = dim3(64, 4, 1);\n  controller_params.eval_cost_kernel_dim_ = dim3(num_timesteps, 1, 1);\n  // Initialize the R MPPI controller\n  auto controller = RobustMPPIController<DYNAMICS, COST_T, FEEDBACK_T, num_timesteps, 1024, SAMPLING>(\n      &model, &cost, &fb_controller, &sampler, controller_params);\n  controller.setKernelChoice(kernelType::USE_SPLIT_KERNELS);\n  int fail_count = 0;\n\n  // Start the while loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    if (cost.computeStateCost(x, t, crash_status) > 1000)\n    {\n      fail_count++;\n      crash_status[0] = 0;\n    }\n\n    if (tubeFailure(x.data()))\n    {\n      cnpy::npy_save(\"robust_rc_large_actual.npy\", actual_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM }, \"w\");\n      // cnpy::npy_save(\"robust_rc_ancillary.npy\", ancillary_trajectory_save.data(),\n      //                {total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM},\"w\");\n      cnpy::npy_save(\"robust_rc_large_nominal.npy\", nominal_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n      cnpy::npy_save(\"robust_rc_large_feedback.npy\", feedback_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n      printf(\"Current Time: %f  \", t * dt);\n      model.printState(x.data());\n      std::cout << \"                    Candidate Free Energies: \" << controller.getCandidateFreeEnergy().transpose()\n                << std::endl;\n      std::cout << \"Tube failure!!\" << std::endl;\n      FAIL() << \"Visualize the trajectories by running scripts/double_integrator/plot_DI_test_trajectories; \"\n                \"the argument to this python file is the build directory of MPPI-Generic\";\n    }\n    // Update the importance sampler\n    controller.updateImportanceSamplingControl(x, 1);\n\n    // Compute the control\n    controller.computeControl(x, 1);\n    controller.computeFeedbackPropagatedStateSeq();\n\n    // Print the system state\n    if (t % 100 == 0)\n    {\n      printf(\"Current Time: %f  \", t * dt);\n      model.printState(x.data());\n      auto free_energy_stats = controller.getFreeEnergyStatistics();\n      std::cout << \"                    Candidate Free Energies: \" << controller.getCandidateFreeEnergy().transpose()\n                << std::endl;\n      std::cout << \"Real    FE [mean, variance]: [\" << free_energy_stats.real_sys.freeEnergyMean << \", \"\n                << free_energy_stats.real_sys.freeEnergyVariance << \"]\" << std::endl;\n      std::cout << \"Nominal FE [mean, variance]: [\" << free_energy_stats.nominal_sys.freeEnergyMean << \", \"\n                << free_energy_stats.nominal_sys.freeEnergyVariance << \"]\" << std::endl;\n      std::cout << \"Algorithm Health Normalizer: [\" << controller.getNormalizerPercent() << \"]\" << std::endl;\n      std::cout << \"DF(x, x0, u): [\" << controller.computeDF() << \"]\\n\" << std::endl;\n    }\n\n    // Save the trajectory from the nominal state\n    auto nominal_trajectory = controller.getTargetStateSeq();\n\n    // Save the ancillary trajectory\n    // auto ancillary_trajectory = controller.getAncillaryStateSeq();\n\n    // Compute the propagated state trajectory\n    auto propagated_trajectory = controller.getFeedbackPropagatedStateSeq();\n\n    // Compute the actual trajectory with no feedback\n    auto actual_trajectory = controller.getTargetStateSeq();\n    controller.computeStateTrajectoryHelper(actual_trajectory, x, controller.getControlSeq());\n\n    for (int i = 0; i < num_timesteps; i++)\n    {\n      for (int j = 0; j < DoubleIntegratorDynamics::STATE_DIM; j++)\n      {\n        actual_trajectory_save[t * num_timesteps * DoubleIntegratorDynamics::STATE_DIM +\n                               i * DoubleIntegratorDynamics::STATE_DIM + j] = actual_trajectory(j, i);\n        // ancillary_trajectory_save[t * num_timesteps * DoubleIntegratorDynamics::STATE_DIM +\n        //                           i*DoubleIntegratorDynamics::STATE_DIM + j] = ancillary_trajectory(j, i);\n        nominal_trajectory_save[t * num_timesteps * DoubleIntegratorDynamics::STATE_DIM +\n                                i * DoubleIntegratorDynamics::STATE_DIM + j] = nominal_trajectory(j, i);\n        feedback_trajectory_save[t * num_timesteps * DoubleIntegratorDynamics::STATE_DIM +\n                                 i * DoubleIntegratorDynamics::STATE_DIM + j] = propagated_trajectory(j, i);\n      }\n    }\n    // Get the open loop control\n    DYNAMICS::control_array current_control = controller.getControlSeq().col(0);\n    //    std::cout << \"Current OL control: \" << current_control.transpose() << std::endl;\n\n    // Apply the feedback given the current state\n    DYNAMICS::control_array fb_control = controller.getFeedbackControl(x, controller.getTargetStateSeq().col(0), 0);\n    current_control += fb_control;\n\n    // Compute real free energy bound\n\n    // Nominal free energy bound is always alpha\n\n    // Bound of real free energy growth\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add the \"true\" noise of the system\n    model.computeStateDisturbance(dt, x);\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n\n  cnpy::npy_save(\"robust_rc_large_actual.npy\", actual_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM }, \"w\");\n  // cnpy::npy_save(\"robust_rc_ancillary.npy\",ancillary_trajectory_save.data(),\n  //                {total_time_horizon, num_timesteps, DYNAMICS::STATE_DIM},\"w\");\n  cnpy::npy_save(\"robust_rc_large_nominal.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"robust_rc_large_feedback.npy\", feedback_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n}\n"
  },
  {
    "path": "tests/controllers/tube_mppi_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/controllers/Tube-MPPI/tube_mppi_controller.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <cnpy.h>\n\nbool tubeFailure(float* s)\n{\n  float inner_path_radius2 = 1.675 * 1.675;\n  float outer_path_radius2 = 2.325 * 2.325;\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  if ((radial_position < inner_path_radius2) || (radial_position > outer_path_radius2))\n  {\n    return true;\n  }\n  else\n  {\n    return false;\n  }\n}\n\nclass DoubleIntegratorTubeMPPI : public ::testing::Test\n{\npublic:\n  static const int num_timesteps = 100;\n  static const int num_rollouts = 512;\n  using DYN = DoubleIntegratorDynamics;\n  using COST = DoubleIntegratorCircleCost;\n  using FB_CONTROLLER = DDPFeedback<DYN, num_timesteps>;\n  using SAMPLING = mppi::sampling_distributions::GaussianDistribution<DYN::DYN_PARAMS_T>;\n  using VANILLA_CONTROLLER = VanillaMPPIController<DYN, COST, FB_CONTROLLER, num_timesteps, num_rollouts>;\n  using TUBE_CONTROLLER = TubeMPPIController<DYN, COST, FB_CONTROLLER, num_timesteps, num_rollouts>;\n\n  void SetUp() override\n  {\n  }\n};\n\nTEST_F(DoubleIntegratorTubeMPPI, Construction)\n{\n  // Define the model and cost\n  DYN model;\n  COST cost;\n  float dt = 0.01;\n  auto fb_controller = FB_CONTROLLER(&model, dt);\n  auto fb_params = fb_controller.getParams();\n  int max_iter = 10;\n  float lambda = 0.5;\n  float alpha = 0.0;\n\n  // DDP cost parameters\n  Eigen::MatrixXf Q;\n  Eigen::MatrixXf R;\n  fb_params.Q = 100 * FB_CONTROLLER::square_state_matrix::Identity();\n  fb_params.Q_f = fb_params.Q;\n  fb_params.R = FB_CONTROLLER::square_control_matrix::Identity();\n  fb_controller.setParams(fb_params);\n\n  // Sampling Distribution setup\n  SAMPLING sampler = SAMPLING();\n\n  auto vanilla_controller = VANILLA_CONTROLLER(&model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n\n  auto controller = TUBE_CONTROLLER(&model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  // This controller needs the ancillary controller running separately for base plant reasons.\n}\n\nTEST_F(DoubleIntegratorTubeMPPI, ConstructionUsingParams)\n{\n  // Define the model and cost\n  DYN model;\n  COST cost;\n  TUBE_CONTROLLER::TEMPLATED_PARAMS controller_params;\n  controller_params.dt_ = 0.01;\n  controller_params.num_iters_ = 10;\n  controller_params.lambda_ = 0.5;\n  controller_params.alpha_ = 0.0;\n  // control std dev\n  // controller_params.control_std_dev_ << 1, 1;\n  auto fb_controller = FB_CONTROLLER(&model, controller_params.dt_);\n  auto fb_params = fb_controller.getParams();\n\n  // DDP cost parameters\n  Eigen::MatrixXf Q;\n  Eigen::MatrixXf R;\n  fb_params.Q = 100 * FB_CONTROLLER::square_state_matrix::Identity();\n  fb_params.Q_f = fb_params.Q;\n  fb_params.R = FB_CONTROLLER::square_control_matrix::Identity();\n  fb_controller.setParams(fb_params);\n\n  // Sampling Distribution setup\n  SAMPLING::SAMPLING_PARAMS_T sampler_params;\n  for (int i = 0; i < DYN::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 1;\n  }\n  SAMPLING sampler = SAMPLING(sampler_params);\n\n  auto vanilla_controller = VANILLA_CONTROLLER(&model, &cost, &fb_controller, &sampler, controller_params);\n  auto controller = TUBE_CONTROLLER(&model, &cost, &fb_controller, &sampler, controller_params);\n\n  // This controller needs the ancillary controller running separately for base plant reasons.\n}\n\nclass DoubleIntegratorTracking : public ::testing::Test\n{\npublic:\n  static const int num_timesteps = 50;\n  static const int num_rollouts = 1024;\n  const unsigned int total_time_horizon = 500;\n  using DYN = DoubleIntegratorDynamics;\n  using COST = DoubleIntegratorCircleCost;\n  using FB_CONTROLLER = DDPFeedback<DYN, num_timesteps>;\n  using SAMPLING = mppi::sampling_distributions::GaussianDistribution<DYN::DYN_PARAMS_T>;\n  using VANILLA_CONTROLLER = VanillaMPPIController<DYN, COST, FB_CONTROLLER, num_timesteps, num_rollouts>;\n  using TUBE_CONTROLLER = TubeMPPIController<DYN, COST, FB_CONTROLLER, num_timesteps, num_rollouts>;\n\n  DYN model;\n  COST cost;\n  float dt = 0.02;   // Timestep of dynamics propagation\n  int max_iter = 3;  // Maximum running iterations of optimization\n  float lambda = 4;  // Learning rate parameter\n  float alpha = 0.0;\n  FB_CONTROLLER fb_controller = FB_CONTROLLER(&model, dt);\n  SAMPLING sampler;\n\n  DYN::state_array x, xdot;\n\n  void SetUp() override\n  {\n    auto params = cost.getParams();\n    params.velocity_desired = 2;\n    cost.setParams(params);\n\n    x << 2, 0, 0, 1;\n    // control std dev\n    SAMPLING::SAMPLING_PARAMS_T sampler_params;\n    for (int i = 0; i < DYN::CONTROL_DIM; i++)\n    {\n      sampler_params.std_dev[i] = 1;\n      sampler_params.control_cost_coeff[i] = 1.0;\n    }\n    sampler = SAMPLING(sampler_params);\n  }\n};\n\nTEST_F(DoubleIntegratorTracking, VanillaMPPINominalVariance)\n{\n  std::vector<float> nominal_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n\n  // Initialize the vanilla MPPI controller\n  auto vanilla_controller = VANILLA_CONTROLLER(&model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = vanilla_controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 1);\n  controller_params.cost_rollout_dim_ = dim3(50, 1, 1);\n  // controller_params.seed_ = 42;\n  vanilla_controller.setParams(controller_params);\n\n  int fail_count = 0;\n  int crash_status[1] = { 0 };\n  // Start the while loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    if (cost.computeStateCost(x, t, crash_status) > 1000)\n    {\n      fail_count++;\n      crash_status[0] = 0;\n    }\n\n    if (tubeFailure(x.data()))\n    {\n      FAIL();\n    }\n\n    // Compute the control\n    vanilla_controller.computeControl(x, 1);\n\n    // Save the nominal trajectory\n    auto nominal_trajectory = vanilla_controller.getTargetStateSeq();\n\n    for (int i = 0; i < num_timesteps; i++)\n    {\n      for (int j = 0; j < DYN::STATE_DIM; j++)\n      {\n        nominal_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] = nominal_trajectory(j, i);\n      }\n    }\n\n    // Propagate the state forward\n    model.computeDynamics(x, vanilla_controller.getControlSeq().col(0), xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add the \"true\" noise of the system\n    model.computeStateDisturbance(dt, x);\n\n    // Slide the control sequence\n    vanilla_controller.slideControlSequence(1);\n  }\n\n  // save it to file\n  cnpy::npy_save(\"vanilla_nominal.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  // std::cout << \"Number of times constraints were violated: \" << fail_count << std::endl;\n}\n\nTEST_F(DoubleIntegratorTracking, VanillaMPPILargeVariance)\n{\n  std::vector<float> nominal_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n  model.setStateVariance(100);\n  // Initialize the vanilla MPPI controller\n  auto vanilla_controller = VANILLA_CONTROLLER(&model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = vanilla_controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 1);\n  controller_params.cost_rollout_dim_ = dim3(50, 1, 1);\n  // controller_params.seed_ = 42;\n  vanilla_controller.setParams(controller_params);\n\n  // bool success = false;\n  int fail_count = 0;\n\n  // Start the while loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    if (tubeFailure(x.data()))\n    {\n      // success = true;\n      fail_count++;\n    }\n\n    if (fail_count > 50)\n    {\n      break;\n    }\n    // Compute the control\n    vanilla_controller.computeControl(x, 1);\n\n    // Save the nominal trajectory\n    auto nominal_trajectory = vanilla_controller.getTargetStateSeq();\n\n    for (int i = 0; i < num_timesteps; i++)\n    {\n      for (int j = 0; j < DYN::STATE_DIM; j++)\n      {\n        nominal_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] = nominal_trajectory(j, i);\n      }\n    }\n\n    // Propagate the state forward\n    model.computeDynamics(x, vanilla_controller.getControlSeq().col(0), xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add the \"true\" noise of the system\n    model.computeStateDisturbance(dt, x);\n\n    // Slide the control sequence\n    vanilla_controller.slideControlSequence(1);\n    //    if (success) {\n    //      break;\n    //    }\n  }\n\n  cnpy::npy_save(\"vanilla_large.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n  // std::cout << \"Number of times constraints were violated: \" << fail_count << std::endl;\n}\n\nTEST_F(DoubleIntegratorTracking, VanillaMPPILargeVarianceTracking)\n{\n  std::vector<float> nominal_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n  std::vector<float> actual_feedback_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n  model.setStateVariance(100);\n\n  // Initialize the vanilla MPPI controller\n  auto vanilla_controller = VANILLA_CONTROLLER(&model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = vanilla_controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 1);\n  controller_params.cost_rollout_dim_ = dim3(50, 1, 1);\n  // controller_params.seed_ = 42;\n  vanilla_controller.setParams(controller_params);\n\n  // DDP cost parameters\n  auto fb_params = vanilla_controller.getFeedbackParams();\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  vanilla_controller.setFeedbackParams(fb_params);\n\n  // bool success = false;\n  int fail_count = 0;\n\n  // Start the while loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    if (tubeFailure(x.data()))\n    {\n      // success = true;\n      fail_count++;\n    }\n\n    if (fail_count > 50)\n    {\n      break;\n    }\n    // Compute the control\n    vanilla_controller.computeControl(x, 1);\n\n    // Compute the feedback gains\n    vanilla_controller.computeFeedback(x);\n\n    // Save the nominal trajectory\n    auto nominal_trajectory = vanilla_controller.getTargetStateSeq();\n    auto nominal_control = vanilla_controller.getControlSeq();\n    vanilla_controller.computeFeedbackPropagatedStateSeq();\n    auto feedback_state_trajectory = vanilla_controller.getFeedbackPropagatedStateSeq();\n\n    for (int i = 0; i < num_timesteps; i++)\n    {\n      for (int j = 0; j < DYN::STATE_DIM; j++)\n      {\n        nominal_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] = nominal_trajectory(j, i);\n        actual_feedback_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] =\n            feedback_state_trajectory(j, i);\n      }\n    }\n\n    // Get the open loop control\n    DYN::control_array current_control = nominal_control.col(0);\n\n    // Apply the feedback given the current state\n    current_control += vanilla_controller.getFeedbackControl(x, nominal_trajectory.col(0), 0);\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add the \"true\" noise of the system\n    model.computeStateDisturbance(dt, x);\n\n    // Slide the control sequence\n    vanilla_controller.slideControlSequence(1);\n  }\n\n  cnpy::npy_save(\"vanilla_large_track_actual.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"vanilla_large_track_feedback.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n  // std::cout << \"Number of times constraints were violated: \" << fail_count << std::endl;\n}\n\nTEST_F(DoubleIntegratorTracking, TubeMPPILargeVariance)\n{\n  // Noise enters the system during the \"true\" state propagation. In this case the noise is nominal\n  model.setStateVariance(100);\n  auto fb_params = fb_controller.getParams();\n  /**\n   * Q =\n   * [500, 0, 0, 0\n   *  0, 500, 0, 0\n   *  0, 0, 100, 0\n   *  0, 0, 0, 100]\n   */\n  fb_params.Q.diagonal() << 500, 500, 100, 100;\n  /**\n   * Qf = I\n   */\n  fb_params.Q_f = FB_CONTROLLER::square_state_matrix::Identity();\n  /**\n   * R = I\n   */\n  fb_params.R = FB_CONTROLLER::square_control_matrix::Identity();\n  fb_controller.setParams(fb_params);\n\n  // To pass it should be lambda = 4, vel desired = 2, vel cost = 1 crash cost 1000, nom threshold 20\n\n  std::vector<float> actual_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n  std::vector<float> nominal_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n  // std::vector<float> ancillary_trajectory_save(num_timesteps*total_time_horizon*DYN::STATE_DIM);\n  std::vector<float> feedback_trajectory_save(num_timesteps * total_time_horizon * DYN::STATE_DIM);\n\n  // Initialize the tube MPPI controller\n  auto controller = TUBE_CONTROLLER(&model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.cost_rollout_dim_ = dim3(50, 1, 1);\n  // controller_params.seed_ = 42;\n  controller.setParams(controller_params);\n\n  controller.setNominalThreshold(100);\n\n  int fail_count = 0;\n  int crash_status[1] = { 0 };\n\n  // Start the while loop\n  for (int t = 0; t < total_time_horizon; ++t)\n  {\n    //     Print the system state\n    if (t % 100 == 0)\n    {\n      float current_cost = cost.computeStateCost(x, 1, crash_status);\n      printf(\"Current Time: %f    \", t * dt);\n      printf(\"Current State Cost: %f    \", current_cost);\n      model.printState(x.data());\n      auto free_energy_stats = controller.getFreeEnergyStatistics();\n      std::cout << \"Real    FE [mean, variance]: [\" << free_energy_stats.real_sys.freeEnergyMean << \", \"\n                << free_energy_stats.real_sys.freeEnergyVariance << \"]\" << std::endl;\n      std::cout << \"Nominal FE [mean, variance]: [\" << free_energy_stats.nominal_sys.freeEnergyMean << \", \"\n                << free_energy_stats.nominal_sys.freeEnergyVariance << \"]\" << std::endl;\n      std::cout << \"Algorithm Health Normalizer: [\" << controller.getNormalizerPercent() << \"]\\n\" << std::endl;\n    }\n\n    if (cost.computeStateCost(x, t, crash_status) > 1000)\n    {\n      fail_count++;\n      crash_status[0] = 0;\n    }\n\n    if (tubeFailure(x.data()))\n    {\n      float current_cost = cost.computeStateCost(x, 1, crash_status);\n      printf(\"Current Time: %f    \", t * dt);\n      printf(\"Current State Cost: %f    \", current_cost);\n      model.printState(x.data());\n      auto free_energy_stats = controller.getFreeEnergyStatistics();\n      std::cout << \"Real    FE [mean, variance]: [\" << free_energy_stats.real_sys.freeEnergyMean << \", \"\n                << free_energy_stats.real_sys.freeEnergyVariance << \"]\" << std::endl;\n      std::cout << \"Nominal FE [mean, variance]: [\" << free_energy_stats.nominal_sys.freeEnergyMean << \", \"\n                << free_energy_stats.nominal_sys.freeEnergyVariance << \"]\" << std::endl;\n      std::cout << \"Algorithm Health Normalizer: [\" << controller.getNormalizerPercent() << \"]\\n\" << std::endl;\n      cnpy::npy_save(\"tube_large_actual.npy\", actual_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n      // cnpy::npy_save(\"tube_ancillary.npy\", ancillary_trajectory_save.data(),\n      //                {total_time_horizon, num_timesteps, DYN::STATE_DIM},\"w\");\n      cnpy::npy_save(\"tube_large_nominal.npy\", nominal_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n      cnpy::npy_save(\"tube_large_feedback.npy\", feedback_trajectory_save.data(),\n                     { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n      FAIL() << \"Visualize the trajectories by running scripts/double_integrator/plot_DI_test_trajectories; \"\n                \"the argument to this python file is the build directory of MPPI-Generic\";\n    }\n\n    // Compute the control\n    controller.computeControl(x, 1);\n\n    // Save the trajectory from the nominal state\n    auto nominal_trajectory = controller.getTargetStateSeq();\n    auto actual_trajectory = controller.getActualStateSeq();\n\n    // Get the feedback gains associated with the nominal state and control trajectory\n    controller.computeFeedback(x);\n\n    // Save the ancillary trajectory\n    // auto ancillary_trajectory = controller.getAncillaryStateSeq();\n\n    // Compute the propagated feedback trajectory\n    controller.computeFeedbackPropagatedStateSeq();\n    auto propagated_feedback_trajectory = controller.getFeedbackPropagatedStateSeq();\n\n    for (int i = 0; i < num_timesteps; i++)\n    {\n      for (int j = 0; j < DYN::STATE_DIM; j++)\n      {\n        actual_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] = actual_trajectory(j, i);\n        // ancillary_trajectory_save[t * num_timesteps * DYN::STATE_DIM +\n        //                         i*DYN::STATE_DIM + j] = ancillary_trajectory(j, i);\n        nominal_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] = nominal_trajectory(j, i);\n        feedback_trajectory_save[t * num_timesteps * DYN::STATE_DIM + i * DYN::STATE_DIM + j] =\n            propagated_feedback_trajectory(j, i);\n      }\n    }\n\n    // Get the open loop control\n    DYN::control_array current_control = controller.getControlSeq().col(0);\n\n    // Apply the feedback given the current state\n    current_control += controller.getFeedbackControl(x, controller.getTargetStateSeq().col(0), 0);\n\n    // Propagate the state forward\n    model.computeDynamics(x, current_control, xdot);\n    model.updateState(x, xdot, dt);\n\n    // Add the \"true\" noise of the system\n    model.computeStateDisturbance(dt, x);\n\n    // Slide the control sequence\n    controller.slideControlSequence(1);\n  }\n\n  cnpy::npy_save(\"tube_large_actual.npy\", actual_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n  // cnpy::npy_save(\"tube_ancillary.npy\",ancillary_trajectory_save.data(),\n  //                {total_time_horizon, num_timesteps, DYN::STATE_DIM},\"w\");\n  cnpy::npy_save(\"tube_large_nominal.npy\", nominal_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n  cnpy::npy_save(\"tube_large_feedback.npy\", feedback_trajectory_save.data(),\n                 { total_time_horizon, num_timesteps, DYN::STATE_DIM }, \"w\");\n}\n"
  },
  {
    "path": "tests/controllers/vanilla_mppi_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh>\n#include <mppi/instantiations/quadrotor_mppi/quadrotor_mppi.cuh>\n\nclass Cartpole_VanillaMPPI : public ::testing::Test\n{\npublic:\n  static const int NUM_TIMESTEPS = 100;\n  static const int NUM_ROLLOUTS = 2048;\n  using DYN_T = CartpoleDynamics;\n  using COST_T = CartpoleQuadraticCost;\n  using FB_T = DDPFeedback<DYN_T, NUM_TIMESTEPS>;\n  using SAMPLING_T = mppi::sampling_distributions::GaussianDistribution<DYN_T::DYN_PARAMS_T>;\n  using CONTROLLER_T = VanillaMPPIController<DYN_T, COST_T, FB_T, NUM_TIMESTEPS, NUM_ROLLOUTS, SAMPLING_T>;\n  using control_trajectory = CONTROLLER_T::control_trajectory;\n  using control_array = CONTROLLER_T::control_array;\n\n  DYN_T model = DYN_T(1.0, 1.0, 1.0);\n  COST_T cost;\n  FB_T* fb_controller;\n  SAMPLING_T* sampler;\n  CONTROLLER_T* controller;\n\n  float dt = 0.01;\n  int max_iter = 10;\n  float gamma = 0.5;\n  float lambda = 0.25;\n  float alpha = 0.01;\n  control_trajectory init_control = control_trajectory::Constant(0);\n  control_array control_std_dev = control_array::Constant(5.0);\n  cudaStream_t stream;\n\n  void SetUp() override\n  {\n    fb_controller = new FB_T(&model, dt);\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n    auto sampler_params = SAMPLING_T::SAMPLING_PARAMS_T();\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      sampler_params.std_dev[i] = control_std_dev[i];\n    }\n    EXPECT_EQ(model.getGrdSharedSizeBytes(), 0);\n    EXPECT_EQ(model.getBlkSharedSizeBytes(), 0);\n    sampler = new SAMPLING_T(sampler_params);\n    controller = new CONTROLLER_T(&model, &cost, fb_controller, sampler, dt, max_iter, lambda, alpha, NUM_TIMESTEPS,\n                                  init_control, stream);\n  }\n\n  void TearDown() override\n  {\n    delete controller;\n    delete fb_controller;\n    delete sampler;\n  }\n};\n\nTEST_F(Cartpole_VanillaMPPI, BindToStream)\n{\n  EXPECT_EQ(controller->stream_, controller->model_->stream_) << \"Stream bind to dynamics failure\";\n  EXPECT_EQ(controller->stream_, controller->cost_->stream_) << \"Stream bind to cost failure\";\n  EXPECT_EQ(controller->stream_, controller->fb_controller_->getHostPointer()->stream_) << \"Stream  bind to feedback \"\n                                                                                           \"failure\";\n  EXPECT_EQ(controller->stream_, controller->sampler_->stream_) << \"Stream bind to sampling distribution failure\";\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST_F(Cartpole_VanillaMPPI, UpdateNoiseStdDev)\n{\n  control_array new_control_std_dev = control_array::Constant(3.5);\n  auto sampler_params = sampler->getParams();\n  for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = new_control_std_dev[i];\n  }\n  sampler->setParams(sampler_params, true);\n  EXPECT_FLOAT_EQ(new_control_std_dev[0], controller->sampler_->getParams().std_dev[0]);\n}\n\nTEST_F(Cartpole_VanillaMPPI, SwingUpTest)\n{\n  CartpoleQuadraticCostParams new_params;\n  new_params.cart_position_coeff = 100;\n  new_params.pole_angle_coeff = 200;\n  new_params.cart_velocity_coeff = 10;\n  new_params.pole_angular_velocity_coeff = 20;\n  new_params.control_cost_coeff[0] = 1;\n  new_params.terminal_cost_coeff = 0;\n  new_params.desired_terminal_state[0] = -20;\n  new_params.desired_terminal_state[1] = 0;\n  new_params.desired_terminal_state[2] = M_PI;\n  new_params.desired_terminal_state[3] = 0;\n\n  cost.setParams(new_params);\n\n  auto sampler_params = sampler->getParams();\n  sampler_params.control_cost_coeff[0] = 1.0;\n  sampler_params.pure_noise_trajectories_percentage = 0.01f;\n  sampler_params.rewrite_controls_block_dim = dim3(32, 16, 1);\n  sampler->setParams(sampler_params);\n\n  auto controller_params = controller->getParams();\n  max_iter = 1;\n  controller_params.dynamics_rollout_dim_ = dim3(64, 4, 1);\n  controller_params.cost_rollout_dim_ = dim3(64, 1, 1);\n  controller_params.num_iters_ = max_iter;\n  controller_params.slide_control_scale_[0] = 1.0;\n  controller->setParams(controller_params);\n\n  controller->chooseAppropriateKernel();\n\n  DYN_T::state_array current_state = DYN_T::state_array::Zero();\n  int time_horizon = 1000;\n\n  DYN_T::state_array xdot(4, 1);\n\n  for (int i = 0; i < time_horizon; ++i)\n  {\n    if (i % 50 == 0)\n    {\n      printf(\"Current Time: %f    \", i * dt);\n      printf(\"Current Baseline Cost: %f    \", controller->getBaselineCost());\n      model.printState(current_state.data());\n    }\n    // Compute the control\n    controller->computeControl(current_state, 1);\n\n    DYN_T::control_array control;\n    control = controller->getControlSeq().block(0, 0, DYN_T::CONTROL_DIM, 1);\n    // Increment the state\n    model.computeStateDeriv(current_state, control, xdot);\n    model.updateState(current_state, xdot, dt);\n\n    controller->slideControlSequence(1);\n  }\n  EXPECT_LT(controller->getBaselineCost(), 1.0);\n}\n\nTEST_F(Cartpole_VanillaMPPI, getSampledStateTrajectoriesTest)\n{\n  // float dt = 0.01;\n  // float max_iter = 1;\n  // float lambda = 0.25;\n  // float alpha = 0.01;\n\n  // CartpoleDynamics::control_array control_std_dev = CartpoleDynamics::control_array::Constant(5.0);\n\n  // auto fb_controller = DDPFeedback<CartpoleDynamics, 100>(&model, dt);\n  // auto controller =\n  //     VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost, DDPFeedback<CartpoleDynamics, 100>, 100, 2048,\n  //     64,\n  //                           8>(&model, &cost, &fb_controller, dt, max_iter, lambda, alpha, control_std_dev);\n  DYN_T::state_array current_state = DYN_T::state_array::Zero();\n  controller->setPercentageSampledControlTrajectories(0.25);\n\n  controller->calculateSampledStateTrajectories();\n  const auto outputs = controller->getSampledOutputTrajectories();\n  EXPECT_EQ(outputs.size(), 0.25 * NUM_ROLLOUTS);\n}\n\nclass Quadrotor_VanillaMPPI : public ::testing::Test\n{\npublic:\n  static const int NUM_TIMESTEPS = 150;\n  static const int NUM_ROLLOUTS = 2048;\n  using DYN_T = QuadrotorDynamics;\n  using COST_T = QuadrotorQuadraticCost;\n  using FB_T = DDPFeedback<DYN_T, NUM_TIMESTEPS>;\n  using SAMPLING_T = mppi::sampling_distributions::GaussianDistribution<DYN_T::DYN_PARAMS_T>;\n  using CONTROLLER_T = VanillaMPPIController<DYN_T, COST_T, FB_T, NUM_TIMESTEPS, NUM_ROLLOUTS, SAMPLING_T>;\n  using control_trajectory = CONTROLLER_T::control_trajectory;\n  using control_array = CONTROLLER_T::control_array;\n\n  float dt = 0.01;\n  int max_iter = 1;\n  float lambda = 4.0;\n  float alpha = 0.9;\n  cudaStream_t stream;\n  control_array control_std_dev = control_array::Constant(0.5);\n  control_trajectory init_control = control_trajectory::Constant(0.0);\n\n  DYN_T model;\n  COST_T cost;\n  FB_T fb_controller = FB_T(&model, dt);\n  SAMPLING_T* sampler;\n  CONTROLLER_T* controller;\n  void SetUp() override\n  {\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n    auto sampler_params = SAMPLING_T::SAMPLING_PARAMS_T();\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      sampler_params.std_dev[i] = control_std_dev[i];\n    }\n    for (int i = 0; i < NUM_TIMESTEPS; i++)\n    {\n      init_control(3, i) = mppi::math::GRAVITY;\n    }\n    sampler = new SAMPLING_T(sampler_params);\n    controller = new CONTROLLER_T(&model, &cost, &fb_controller, sampler, dt, max_iter, lambda, alpha, NUM_TIMESTEPS,\n                                  init_control, stream);\n  }\n\n  void TearDown() override\n  {\n    delete controller;\n    delete sampler;\n  }\n};\n\nTEST_F(Quadrotor_VanillaMPPI, HoverTest)\n{\n  QuadrotorQuadraticCostParams new_params;\n  new_params.x_goal()[2] = 1;\n  new_params.x_coeff = 400;\n  new_params.v_coeff = 150;\n  // new_params.q_coeff = 15;\n  new_params.roll_coeff = 15;\n  new_params.pitch_coeff = 15;\n  new_params.yaw_coeff = 15;\n  new_params.w_coeff = 5;\n  float acceptable_distance = 0.15;  // meters\n\n  std::cout << \"Goal: \" << new_params.getDesiredState().transpose() << std::endl;\n\n  cost.setParams(new_params);\n\n  // int max_iter = 1;\n  // float lambda = 4.0;\n  // float alpha = 0.9;\n\n  // DYN_T::control_array control_std_dev = DYN_T::control_array::Constant(0.5);\n  // control_std_dev[3] = 2;\n  auto sampler_params = sampler->getParams();\n  sampler_params.std_dev[3] = 2.0f;\n  sampler_params.rewrite_controls_block_dim = dim3(32, 16, 1);\n  sampler_params.sum_strides = 32;\n  sampler->setParams(sampler_params);\n\n  // CONTROLLER::control_trajectory init_control = CONTROLLER::control_trajectory::Zero();\n  // for (int i = 0; i < num_timesteps; i++)\n  // {\n  //   init_control(3, i) = mppi::math::GRAVITY;\n  // }\n\n  // auto controller = CONTROLLER(&model, &cost, &fb_controller, dt, max_iter, lambda, alpha, control_std_dev,\n  //                              num_timesteps, init_control);\n  auto controller_params = controller->getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(32, 4, 1);\n  controller_params.cost_rollout_dim_ = dim3(32, 4, 1);\n  controller->setParams(controller_params);\n  controller->setDebug(true);\n  controller->chooseAppropriateKernel();\n  DYN_T::state_array current_state = DYN_T::state_array::Zero();\n  // current_state(6) = 1;  // set q_w to 1\n  current_state(S_IND_CLASS(DYN_T::DYN_PARAMS_T, QUAT_W)) = 1;  // set q_w to 1\n  int time_horizon = 3000;\n\n  // float xdot[DYN_T::STATE_DIM];\n  DYN_T::state_array xdot(4, 1);\n  DYN_T::control_array control;\n  control = controller->getControlSeq().block(0, 0, DYN_T::CONTROL_DIM, 1);\n\n  int far_away_cnt = 0;\n  Eigen::Vector3f goal_pos = new_params.getDesiredState().block<3, 1>(0, 0);\n\n  std::chrono::steady_clock::time_point loop_start = std::chrono::steady_clock::now();\n  std::chrono::steady_clock::time_point loop_end = std::chrono::steady_clock::now();\n  for (int i = 0; i < time_horizon; ++i)\n  {\n    if (i % 50 == 0)\n    {\n      printf(\"Current Time: %6.2f    \", i * dt);\n      printf(\"Current Baseline Cost: %f \\n\", controller->getBaselineCost());\n      printf(\"State Cost: %f\\n\", controller->cost_->computeStateCost(current_state));\n      model.printState(current_state.data());\n      std::cout << \"Control: \" << control.transpose() << std::endl;\n      std::cout << \"ComputeControl took \" << mppi::math::timeDiffms(loop_end, loop_start) << \" ms\" << std::endl;\n    }\n    if (std::isnan(controller->getBaselineCost()) || control.hasNaN() || current_state.hasNaN())\n    {\n      printf(\"ENCOUNTERED A NAN!!\\n\");\n      printf(\"Current Time: %f    \", i * dt);\n      printf(\"Current Baseline Cost: %f \\n\", controller->getBaselineCost());\n      model.printState(current_state.data());\n      std::cout << \"Control: \" << control.transpose() << std::endl;\n      FAIL();\n    }\n\n    // Compute the control\n    loop_start = std::chrono::steady_clock::now();\n    controller->computeControl(current_state, 1);\n    loop_end = std::chrono::steady_clock::now();\n\n    control = controller->getControlSeq().block(0, 0, DYN_T::CONTROL_DIM, 1);\n    // Increment the state\n    model.computeStateDeriv(current_state, control, xdot);\n    model.updateState(current_state, xdot, dt);\n\n    controller->slideControlSequence(1);\n    Eigen::Vector3f pos = current_state.block<3, 1>(0, 0);\n    float dist = (pos - goal_pos).norm();\n    if (dist > acceptable_distance)\n    {\n      far_away_cnt++;\n    }\n  }\n  float percentage_in_ball = float(far_away_cnt) / time_horizon;\n  std::cout << \"Amount of time outside \" << acceptable_distance << \" m: \" << percentage_in_ball * 100 << \"%\"\n            << std::endl;\n\n  EXPECT_LT(percentage_in_ball, 0.1);\n}\n"
  },
  {
    "path": "tests/cost_functions/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n  target_link_libraries(${T_NAME}\n                        ${GTEST_LIBRARIES}\n                        ${MPPI_HEADER_LIBRARY_NAME})\n#  add_test(NAME ${T_NAME} COMMAND ${T_NAME})\n  gtest_add_tests(TARGET ${T_NAME})\n  set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/cost_functions/autorally_robust_cost_test.cu",
    "content": "//\n// Created by jgibson37 on 2/7/20.\n//\n\n#include <gtest/gtest.h>\n#include <mppi/cost_functions/autorally/ar_robust_cost.cuh>\n#include <kernel_tests/cost_functions/autorally/ar_robust_cost_kernel_test.cuh>\n#include <kernel_tests/cost_functions/cost_generic_kernel_tests.cuh>\n\n// Auto-generated header file\n#include <autorally_test_map.h>\n\nclass ARRobustCostTest : public ::testing::Test\n{\npublic:\n  ARRobustCost cost;\n\nprotected:\n  void SetUp() override\n  {\n    ARRobustCostParams params;\n    params.boundary_threshold = 0.0;\n    params.crash_coeff = 10000;\n    params.track_slop = 0.0;\n    params.desired_speed = 10;\n    params.speed_coeff = 10;\n    params.heading_coeff = 20;\n    cost.setParams(params);\n  }\n  void TearDown() override\n  {\n  }\n};\n\nTEST_F(ARRobustCostTest, Constructor)\n{\n  // checks for CRTP\n  ARRobustCost* robust = cost.cost_d_;\n  EXPECT_EQ(robust, nullptr);\n}\n\nTEST_F(ARRobustCostTest, GPUSetup)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  cost.bindToStream(stream);\n\n  EXPECT_EQ(cost.stream_, stream) << \"Stream binding failure.\";\n\n  EXPECT_EQ(cost.GPUMemStatus_, false);\n  EXPECT_EQ(cost.cost_d_, nullptr);\n\n  cost.GPUSetup();\n\n  EXPECT_EQ(cost.GPUMemStatus_, true);\n  EXPECT_NE(cost.cost_d_, nullptr);\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nvoid checkParameters(ARRobustCostParams& params, ARRobustCostParams& result)\n{\n  EXPECT_FLOAT_EQ(params.speed_coeff, result.speed_coeff);\n  EXPECT_FLOAT_EQ(params.track_coeff, result.track_coeff);\n  EXPECT_FLOAT_EQ(params.heading_coeff, result.heading_coeff);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[0], result.control_cost_coeff[0]);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[1], result.control_cost_coeff[1]);\n  EXPECT_FLOAT_EQ(params.slip_coeff, result.slip_coeff);\n  EXPECT_FLOAT_EQ(params.crash_coeff, result.crash_coeff);\n  EXPECT_FLOAT_EQ(params.boundary_threshold, result.boundary_threshold);\n  EXPECT_FLOAT_EQ(params.max_slip_ang, result.max_slip_ang);\n  EXPECT_FLOAT_EQ(params.track_slop, result.track_slop);\n  EXPECT_FLOAT_EQ(params.r_c1.x, result.r_c1.x);\n  EXPECT_FLOAT_EQ(params.r_c1.y, result.r_c1.y);\n  EXPECT_FLOAT_EQ(params.r_c1.z, result.r_c1.z);\n  EXPECT_FLOAT_EQ(params.r_c2.x, result.r_c2.x);\n  EXPECT_FLOAT_EQ(params.r_c2.y, result.r_c2.y);\n  EXPECT_FLOAT_EQ(params.r_c2.z, result.r_c2.z);\n  EXPECT_FLOAT_EQ(params.trs.x, result.trs.x);\n  EXPECT_FLOAT_EQ(params.trs.y, result.trs.y);\n  EXPECT_FLOAT_EQ(params.trs.z, result.trs.z);\n}\n\nTEST_F(ARRobustCostTest, setParams)\n{\n  ARRobustCostParams params;\n\n  params.speed_coeff = 1.0;\n  params.track_coeff = 2.0;\n  params.heading_coeff = 3.0;\n  params.control_cost_coeff[0] = 4.0;\n  params.control_cost_coeff[1] = 5.0;\n  params.slip_coeff = 6.0;\n  params.crash_coeff = 7.0;\n  params.boundary_threshold = 8.0;\n  params.max_slip_ang = 9.0;\n  params.track_slop = 10.0;\n  params.r_c1.x = 12;\n  params.r_c1.y = 13;\n  params.r_c1.z = 14;\n  params.r_c2.x = 15;\n  params.r_c2.y = 16;\n  params.r_c2.z = 17;\n  params.trs.x = 18;\n  params.trs.y = 19;\n  params.trs.z = 20;\n\n  cost.setParams(params);\n  ARRobustCostParams result = cost.getParams();\n  checkParameters(params, result);\n\n  cost.GPUSetup();\n  int width, height;\n  launchParameterTestKernel<>(cost, params, width, height);\n  checkParameters(params, result);\n}\n\nTEST_F(ARRobustCostTest, getStabilizingCostTest)\n{\n  ARRobustCostParams params;\n  params.max_slip_ang = 1.25;\n  params.crash_coeff = 10000;\n  params.slip_coeff = 10;\n  cost.setParams(params);\n\n  float s[7];\n  for (int i = 0; i < 7; i++)\n  {\n    s[i] = 0;\n  }\n  s[4] = 0.24;\n  s[5] = 0.0;\n  float result = cost.getStabilizingCost(s);\n  EXPECT_FLOAT_EQ(result, 0.0);\n\n  s[4] = 1.0;\n  s[5] = 1.0;\n  result = cost.getStabilizingCost(s);\n  EXPECT_FLOAT_EQ(result, 0.785398 * 10);\n  // EXPECT_FLOAT_EQ(result, fabs(atan(s[5]/s[4])) * params.slip_coeff + params.crash_coeff);\n\n  s[4] = 1.0;\n  s[5] = 10.0;\n  result = cost.getStabilizingCost(s);\n  EXPECT_FLOAT_EQ(result, 1.4711 * 10 + params.crash_coeff);\n\n  s[3] = 1.5;\n  s[4] = 1.0;\n  s[5] = 10.0;\n  result = cost.getStabilizingCost(s);\n  EXPECT_FLOAT_EQ(result, 1.4711 * 10 + params.crash_coeff);\n}\n\nfloat calculateRobustCostmapValue(ARRobustCost& cost, float3 state, int width, int height, float x_min, float x_max,\n                                  float y_min, float y_max, int ppm)\n{\n  float x_front = state.x + cost.FRONT_D * cosf(state.z);\n  float y_front = state.y + cost.FRONT_D * sinf(state.z);\n  float x_back = state.x + cost.BACK_D * cosf(state.z);\n  float y_back = state.y + cost.BACK_D * sinf(state.z);\n\n  // check for overflow\n  float new_x = max(min(x_front - x_min, x_max - x_min), 0.0f);\n  float new_y = max(min(y_front - y_min, y_max - y_min), 0.0f);\n\n  // calculate the track value\n  float front_track = fabs(height / 2.0f - (new_y)) + (new_x) / width;\n  std::cout << \"front point = \" << new_x << \", \" << new_y << \" = \" << front_track << std::endl;\n\n  new_x = max(min(x_back - x_min + 1.0 / (width * ppm), x_max - x_min), 0.0f);\n  new_y = max(min(y_back - y_min + 1.0 / (height * ppm), y_max - y_min), 0.0f);\n\n  float back_track = fabs(height / 2.0f - (new_y)) + (new_x) / width;\n  std::cout << \"back point = \" << new_x << \", \" << new_y << \" = \" << back_track << std::endl;\n  return (front_track + back_track) / 2.0f;\n}\n\nTEST_F(ARRobustCostTest, getCostmapCostSpeedMapTest)\n{\n  ARRobustCostParams params = cost.getParams();\n  params.desired_speed = -1;\n  cost.setParams(params);\n\n  cost.GPUSetup();\n  cost.loadTrackData(mppi::tests::robust_test_map_file);\n\n  std::vector<std::array<float, 9>> states;\n\n  std::array<float, 9> array = { 0.0 };\n  array[0] = 3.0;     // X\n  array[1] = 0.0;     // Y\n  array[2] = M_PI_2;  // Theta\n  array[3] = 0.0;     // Roll\n  array[4] = 2.0;     // Vx\n  array[5] = 1.0;     // Vy\n  array[6] = 0.1;     // Yaw dot\n  array[7] = 0.5;     // steering\n  array[8] = 0.3;     // throttle\n  states.push_back(array);\n\n  std::vector<float> cost_results;\n\n  launchGetCostmapCostTestKernel(cost, states, cost_results);\n\n  EXPECT_FLOAT_EQ(cost_results[0], 11629.229);\n}\n\nTEST_F(ARRobustCostTest, getCostmapCostSpeedNoMapTest)\n{\n  cost.GPUSetup();\n  cost.loadTrackData(mppi::tests::robust_test_map_file);\n\n  std::vector<std::array<float, 9>> states;\n\n  std::array<float, 9> array = { 0.0 };\n  array[0] = 3.0;     // X\n  array[1] = 0.0;     // Y\n  array[2] = M_PI_2;  // Theta\n  array[3] = 0.0;     // Roll\n  array[4] = 2.0;     // Vx\n  array[5] = 1.0;     // Vy\n  array[6] = 0.1;     // Yaw dot\n  array[7] = 0.5;     // steering\n  array[8] = 0.3;     // throttle\n  states.push_back(array);\n\n  std::vector<float> cost_results;\n\n  launchGetCostmapCostTestKernel(cost, states, cost_results);\n\n  EXPECT_FLOAT_EQ(cost_results[0], 11349.729);\n}\n\nTEST_F(ARRobustCostTest, computeCostTest)\n{\n  cost.GPUSetup();\n  cost.loadTrackData(mppi::tests::robust_test_map_file);\n\n  checkGPURolloutCost(cost, 0.02);\n\n  // std::vector<std::array<float, 9>> states;\n\n  // std::array<float, 9> array = { 0.0 };\n  // array[0] = 3.0;     // X\n  // array[1] = 0.0;     // Y\n  // array[2] = M_PI_2;  // Theta\n  // array[3] = 0.0;     // Roll\n  // array[4] = 2.0;     // Vx\n  // array[5] = 1.0;     // Vy\n  // array[6] = 0.1;     // Yaw dot\n  // array[7] = 0.5;     // steering\n  // array[8] = 0.3;     // throttle\n  // states.push_back(array);\n\n  // std::vector<float> cost_results;\n\n  // launchComputeCostTestKernel<>(cost, states, cost_results);\n\n  // EXPECT_FLOAT_EQ(cost_results[0], 11349.729);\n}\n"
  },
  {
    "path": "tests/cost_functions/autorally_standard_cost_test.cu",
    "content": "//\n// Created by jason on 1/7/20.\n//\n\n#include <gtest/gtest.h>\n#include <mppi/cost_functions/autorally/ar_standard_cost.cuh>\n#include <kernel_tests/cost_functions/autorally/ar_standard_cost_kernel_test.cuh>\n\n// Auto-generated header file\n#include <autorally_test_map.h>\n\nTEST(ARStandardCost, Constructor)\n{\n  ARStandardCost cost;\n}\n\nTEST(ARStandardCost, BindStream)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  ARStandardCost cost(stream);\n\n  EXPECT_EQ(cost.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(ARStandardCost, SetGetParamsHost)\n{\n  ARStandardCostParams params;\n  params.desired_speed = 25;\n  params.speed_coeff = 2;\n  params.track_coeff = 100;\n  params.max_slip_ang = 1.5;\n  params.slip_coeff = 1000;\n  params.track_slop = 0.2;\n  params.crash_coeff = 10000;\n  params.control_cost_coeff[0] = 20;\n  params.control_cost_coeff[1] = 10;\n  params.boundary_threshold = 10;\n  params.discount = 0.9;\n  params.grid_res = 2;\n\n  params.r_c1.x = 0;\n  params.r_c1.y = 1;\n  params.r_c1.z = 2;\n  params.r_c2.x = 3;\n  params.r_c2.y = 4;\n  params.r_c2.z = 5;\n  params.trs.x = 6;\n  params.trs.y = 7;\n  params.trs.z = 8;\n  ARStandardCost cost;\n\n  cost.setParams(params);\n  ARStandardCostParams result_params = cost.getParams();\n\n  EXPECT_FLOAT_EQ(params.desired_speed, result_params.desired_speed);\n  EXPECT_FLOAT_EQ(params.speed_coeff, result_params.speed_coeff);\n  EXPECT_FLOAT_EQ(params.track_coeff, result_params.track_coeff);\n  EXPECT_FLOAT_EQ(params.max_slip_ang, result_params.max_slip_ang);\n  EXPECT_FLOAT_EQ(params.slip_coeff, result_params.slip_coeff);\n  EXPECT_FLOAT_EQ(params.track_slop, result_params.track_slop);\n  EXPECT_FLOAT_EQ(params.crash_coeff, result_params.crash_coeff);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[0], result_params.control_cost_coeff[0]);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[1], result_params.control_cost_coeff[1]);\n  EXPECT_FLOAT_EQ(params.boundary_threshold, result_params.boundary_threshold);\n  EXPECT_FLOAT_EQ(params.discount, result_params.discount);\n  EXPECT_EQ(params.grid_res, result_params.grid_res);\n  EXPECT_FLOAT_EQ(params.r_c1.x, result_params.r_c1.x);\n  EXPECT_FLOAT_EQ(params.r_c1.y, result_params.r_c1.y);\n  EXPECT_FLOAT_EQ(params.r_c1.z, result_params.r_c1.z);\n  EXPECT_FLOAT_EQ(params.r_c2.x, result_params.r_c2.x);\n  EXPECT_FLOAT_EQ(params.r_c2.y, result_params.r_c2.y);\n  EXPECT_FLOAT_EQ(params.r_c2.z, result_params.r_c2.z);\n  EXPECT_FLOAT_EQ(params.trs.x, result_params.trs.x);\n  EXPECT_FLOAT_EQ(params.trs.y, result_params.trs.y);\n  EXPECT_FLOAT_EQ(params.trs.z, result_params.trs.z);\n}\n\nTEST(ARStandardCost, GPUSetupAndParamsToDeviceTest)\n{\n  // TODO make sre GPUMemstatus is false on the GPU so deallocation can be automatic\n  ARStandardCostParams params;\n  ARStandardCost cost;\n  params.desired_speed = 25;\n  params.speed_coeff = 2;\n  params.track_coeff = 100;\n  params.max_slip_ang = 1.5;\n  params.slip_coeff = 1000;\n  params.track_slop = 0.2;\n  params.crash_coeff = 10000;\n  params.control_cost_coeff[0] = 20;\n  params.control_cost_coeff[1] = 10;\n  params.boundary_threshold = 10;\n  params.discount = 0.9;\n  params.grid_res = 2;\n\n  params.r_c1.x = 0;\n  params.r_c1.y = 1;\n  params.r_c1.z = 2;\n  params.r_c2.x = 3;\n  params.r_c2.y = 4;\n  params.r_c2.z = 5;\n  params.trs.x = 6;\n  params.trs.y = 7;\n  params.trs.z = 8;\n  cost.setParams(params);\n\n  EXPECT_EQ(cost.GPUMemStatus_, false);\n  EXPECT_EQ(cost.cost_d_, nullptr);\n\n  cost.GPUSetup();\n\n  EXPECT_EQ(cost.GPUMemStatus_, true);\n  EXPECT_NE(cost.cost_d_, nullptr);\n\n  ARStandardCostParams result_params;\n  int width, height;\n  launchParameterTestKernel<>(cost, result_params, width, height);\n\n  EXPECT_FLOAT_EQ(params.desired_speed, result_params.desired_speed);\n  EXPECT_FLOAT_EQ(params.speed_coeff, result_params.speed_coeff);\n  EXPECT_FLOAT_EQ(params.track_coeff, result_params.track_coeff);\n  EXPECT_FLOAT_EQ(params.max_slip_ang, result_params.max_slip_ang);\n  EXPECT_FLOAT_EQ(params.slip_coeff, result_params.slip_coeff);\n  EXPECT_FLOAT_EQ(params.track_slop, result_params.track_slop);\n  EXPECT_FLOAT_EQ(params.crash_coeff, result_params.crash_coeff);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[0], result_params.control_cost_coeff[0]);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[1], result_params.control_cost_coeff[1]);\n  EXPECT_FLOAT_EQ(params.boundary_threshold, result_params.boundary_threshold);\n  EXPECT_FLOAT_EQ(params.discount, result_params.discount);\n  EXPECT_EQ(params.grid_res, result_params.grid_res);\n  EXPECT_FLOAT_EQ(params.r_c1.x, result_params.r_c1.x);\n  EXPECT_FLOAT_EQ(params.r_c1.y, result_params.r_c1.y);\n  EXPECT_FLOAT_EQ(params.r_c1.z, result_params.r_c1.z);\n  EXPECT_FLOAT_EQ(params.r_c2.x, result_params.r_c2.x);\n  EXPECT_FLOAT_EQ(params.r_c2.y, result_params.r_c2.y);\n  EXPECT_FLOAT_EQ(params.r_c2.z, result_params.r_c2.z);\n  EXPECT_FLOAT_EQ(params.trs.x, result_params.trs.x);\n  EXPECT_FLOAT_EQ(params.trs.y, result_params.trs.y);\n  EXPECT_FLOAT_EQ(params.trs.z, result_params.trs.z);\n  // neither should be set by this sequence\n  EXPECT_EQ(width, -1);\n  EXPECT_EQ(height, -1);\n\n  params.desired_speed = 5;\n  params.r_c1.x = 4;\n  params.r_c1.y = 5;\n  params.r_c1.z = 6;\n  cost.setParams(params);\n\n  launchParameterTestKernel<>(cost, result_params, width, height);\n\n  EXPECT_FLOAT_EQ(params.desired_speed, result_params.desired_speed);\n  EXPECT_FLOAT_EQ(params.speed_coeff, result_params.speed_coeff);\n  EXPECT_FLOAT_EQ(params.track_coeff, result_params.track_coeff);\n  EXPECT_FLOAT_EQ(params.max_slip_ang, result_params.max_slip_ang);\n  EXPECT_FLOAT_EQ(params.slip_coeff, result_params.slip_coeff);\n  EXPECT_FLOAT_EQ(params.track_slop, result_params.track_slop);\n  EXPECT_FLOAT_EQ(params.crash_coeff, result_params.crash_coeff);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[0], result_params.control_cost_coeff[0]);\n  EXPECT_FLOAT_EQ(params.control_cost_coeff[1], result_params.control_cost_coeff[1]);\n  EXPECT_FLOAT_EQ(params.boundary_threshold, result_params.boundary_threshold);\n  EXPECT_FLOAT_EQ(params.discount, result_params.discount);\n  EXPECT_EQ(params.grid_res, result_params.grid_res);\n  EXPECT_FLOAT_EQ(params.r_c1.x, result_params.r_c1.x);\n  EXPECT_FLOAT_EQ(params.r_c1.y, result_params.r_c1.y);\n  EXPECT_FLOAT_EQ(params.r_c1.z, result_params.r_c1.z);\n  EXPECT_FLOAT_EQ(params.r_c2.x, result_params.r_c2.x);\n  EXPECT_FLOAT_EQ(params.r_c2.y, result_params.r_c2.y);\n  EXPECT_FLOAT_EQ(params.r_c2.z, result_params.r_c2.z);\n  EXPECT_FLOAT_EQ(params.trs.x, result_params.trs.x);\n  EXPECT_FLOAT_EQ(params.trs.y, result_params.trs.y);\n  EXPECT_FLOAT_EQ(params.trs.z, result_params.trs.z);\n\n  // neither should be set by this sequence\n  EXPECT_EQ(width, -1);\n  EXPECT_EQ(height, -1);\n}\n\nTEST(ARStandardCost, coorTransformTest)\n{\n  float x, y, u, v, w;\n\n  ARStandardCostParams params;\n  ARStandardCost cost;\n\n  x = 0;\n  y = 10;\n\n  params.r_c1.x = 0;\n  params.r_c1.y = 1;\n  params.r_c1.z = 2;\n  params.r_c2.x = 3;\n  params.r_c2.y = 4;\n  params.r_c2.z = 5;\n  params.trs.x = 6;\n  params.trs.y = 7;\n  params.trs.z = 8;\n  cost.setParams(params);\n\n  cost.coorTransform(x, y, &u, &v, &w);\n\n  EXPECT_FLOAT_EQ(u, 36);\n  EXPECT_FLOAT_EQ(v, 47);\n  EXPECT_FLOAT_EQ(w, 58);\n}\n\nTEST(ARStandardCost, changeCostmapSizeTestValidInputs)\n{\n  ARStandardCost cost;\n  cost.changeCostmapSize(4, 8);\n\n  EXPECT_EQ(cost.getWidth(), 4);\n  EXPECT_EQ(cost.getHeight(), 8);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 4 * 8);\n\n  std::vector<float4> result;\n\n  // launchCheckCudaArray(result, cost.getCudaArray(), 4*8);\n\n  // EXPECT_EQ(result.size(), 4*8);\n  // TODO verify that cuda is properly allocating the memory\n  /*\n  for(int i = 0; i < 4*8; i++) {\n    EXPECT_FLOAT_EQ(result[i].x, 0);\n    EXPECT_FLOAT_EQ(result[i].y, 0);\n    EXPECT_FLOAT_EQ(result[i].z, 0);\n    EXPECT_FLOAT_EQ(result[i].w, 0);\n  }\n   */\n}\n\nTEST(ARStandardCost, changeCostmapSizeTestFail)\n{\n  ARStandardCost cost;\n  cost.changeCostmapSize(4, 8);\n\n  EXPECT_EQ(cost.getWidth(), 4);\n  EXPECT_EQ(cost.getHeight(), 8);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 4 * 8);\n\n  testing::internal::CaptureStderr();\n  cost.changeCostmapSize(-1, -1);\n  std::string error_msg = testing::internal::GetCapturedStderr();\n\n  EXPECT_NE(error_msg, \"\");\n\n  EXPECT_EQ(cost.getWidth(), 4);\n  EXPECT_EQ(cost.getHeight(), 8);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 4 * 8);\n}\n\nTEST(ARStandardCost, clearCostmapTest)\n{\n  ARStandardCost cost;\n  cost.clearCostmapCPU(4, 8);\n\n  EXPECT_EQ(cost.getWidth(), 4);\n  EXPECT_EQ(cost.getHeight(), 8);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 4 * 8);\n\n  for (int i = 0; i < 4 * 8; i++)\n  {\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).x, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).y, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).z, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).w, 0);\n  }\n}\n\nTEST(ARStandardCost, clearCostmapTestDefault)\n{\n  ARStandardCost cost;\n  cost.clearCostmapCPU(4, 8);\n\n  EXPECT_EQ(cost.getWidth(), 4);\n  EXPECT_EQ(cost.getHeight(), 8);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 4 * 8);\n\n  for (int i = 0; i < 4 * 8; i++)\n  {\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).x, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).y, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).z, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).w, 0);\n  }\n\n  testing::internal::CaptureStderr();\n  cost.clearCostmapCPU();\n  std::string error_msg = testing::internal::GetCapturedStderr();\n\n  EXPECT_NE(error_msg, \"\");\n\n  EXPECT_EQ(cost.getWidth(), 4);\n  EXPECT_EQ(cost.getHeight(), 8);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 4 * 8);\n\n  for (int i = 0; i < 4 * 8; i++)\n  {\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).x, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).y, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).z, 0);\n    EXPECT_FLOAT_EQ(cost.getTrackCostCPU().at(i).w, 0);\n  }\n}\n\nTEST(ARStandardCost, clearCostmapTestDefaultFail)\n{\n  ARStandardCost cost;\n\n  testing::internal::CaptureStderr();\n  cost.clearCostmapCPU();\n  std::string error_msg = testing::internal::GetCapturedStderr();\n\n  EXPECT_NE(error_msg, \"\");\n\n  EXPECT_EQ(cost.getWidth(), -1);\n  EXPECT_EQ(cost.getHeight(), -1);\n  EXPECT_EQ(cost.getTrackCostCPU().capacity(), 0);\n}\n\nTEST(ARStandardCost, updateTransformCPUTest)\n{\n  ARStandardCost cost;\n  Eigen::MatrixXf R(3, 3);\n  Eigen::ArrayXf trs(3);\n  R(0, 0) = 1;\n  R(0, 1) = 2;\n  R(0, 2) = 3;\n\n  R(1, 0) = 4;\n  R(1, 1) = 5;\n  R(1, 2) = 6;\n\n  R(2, 0) = 7;\n  R(2, 1) = 8;\n  R(2, 2) = 9;\n\n  trs(0) = 10;\n  trs(1) = 11;\n  trs(2) = 12;\n\n  cost.updateTransform(R, trs);\n\n  /*\n   * Prospective transform matrix\n   * r_c1.x, r_c2.x, trs.x\n   * r_c1.y, r_c2.y, trs.y\n   * r_c1.z, r_c2.z, trs.z\n   */\n\n  EXPECT_FLOAT_EQ(cost.getParams().r_c1.x, 1);\n  EXPECT_FLOAT_EQ(cost.getParams().r_c2.x, 2);\n  EXPECT_FLOAT_EQ(cost.getParams().trs.x, 10);\n\n  EXPECT_FLOAT_EQ(cost.getParams().r_c1.y, 4);\n  EXPECT_FLOAT_EQ(cost.getParams().r_c2.y, 5);\n  EXPECT_FLOAT_EQ(cost.getParams().trs.y, 11);\n\n  EXPECT_FLOAT_EQ(cost.getParams().r_c1.z, 7);\n  EXPECT_FLOAT_EQ(cost.getParams().r_c2.z, 8);\n  EXPECT_FLOAT_EQ(cost.getParams().trs.z, 12);\n\n  EXPECT_EQ(cost.GPUMemStatus_, false);\n}\n\nTEST(ARStandardCost, updateTransformGPUTest)\n{\n  ARStandardCost cost;\n  cost.GPUSetup();\n  Eigen::MatrixXf R(3, 3);\n  Eigen::ArrayXf trs(3);\n  R(0, 0) = 1;\n  R(0, 1) = 2;\n  R(0, 2) = 3;\n\n  R(1, 0) = 4;\n  R(1, 1) = 5;\n  R(1, 2) = 6;\n\n  R(2, 0) = 7;\n  R(2, 1) = 8;\n  R(2, 2) = 9;\n\n  trs(0) = 10;\n  trs(1) = 11;\n  trs(2) = 12;\n\n  cost.updateTransform(R, trs);\n\n  /*\n   * Prospective transform matrix\n   * r_c1.x, r_c2.x, trs.x\n   * r_c1.y, r_c2.y, trs.y\n   * r_c1.z, r_c2.z, trs.z\n   */\n\n  std::vector<float3> results;\n  launchTransformTestKernel<>(results, cost);\n\n  EXPECT_EQ(results.size(), 3);\n\n  EXPECT_FLOAT_EQ(results.at(0).x, 1);\n  EXPECT_FLOAT_EQ(results.at(0).y, 4);\n  EXPECT_FLOAT_EQ(results.at(0).z, 7);\n\n  EXPECT_FLOAT_EQ(results.at(1).x, 2);\n  EXPECT_FLOAT_EQ(results.at(1).y, 5);\n  EXPECT_FLOAT_EQ(results.at(1).z, 8);\n\n  EXPECT_FLOAT_EQ(results.at(2).x, 10);\n  EXPECT_FLOAT_EQ(results.at(2).y, 11);\n  EXPECT_FLOAT_EQ(results.at(2).z, 12);\n\n  EXPECT_EQ(cost.GPUMemStatus_, true);\n}\n\nTEST(ARStandardCost, LoadTrackDataInvalidLocationTest)\n{\n  ARStandardCost cost;\n\n  testing::internal::CaptureStderr();\n  cost.loadTrackData(\"/null\");\n  std::string error_msg = testing::internal::GetCapturedStderr();\n\n  EXPECT_NE(error_msg, \"\");\n  EXPECT_NE(error_msg.find(\"/null\", 0), std::string::npos);\n}\n\nTEST(ARStandardCost, LoadTrackDataTest)\n{\n  ARStandardCost cost;\n  // TODO set parameters for cost map\n  cost.GPUSetup();\n\n  // load\n  std::vector<float4> costmap = cost.loadTrackData(mppi::tests::test_map_file);\n\n  Eigen::Matrix3f R = cost.getRotation();\n  Eigen::Array3f trs = cost.getTranslation();\n\n  EXPECT_FLOAT_EQ(costmap.at(0).x, 1);\n  EXPECT_FLOAT_EQ(costmap.at(0).y, 10);\n  EXPECT_FLOAT_EQ(costmap.at(0).z, 100);\n  EXPECT_FLOAT_EQ(costmap.at(0).w, 1000);\n  EXPECT_FLOAT_EQ(costmap.at(1).x, 2);\n  EXPECT_FLOAT_EQ(costmap.at(1).y, 20);\n  EXPECT_FLOAT_EQ(costmap.at(1).z, 200);\n  EXPECT_FLOAT_EQ(costmap.at(1).w, 2000);\n\n  // check transformation, should not have a rotation\n  EXPECT_FLOAT_EQ(R(0, 0), 1.0 / (10));\n  EXPECT_FLOAT_EQ(R(1, 1), 1.0 / (20));\n  EXPECT_FLOAT_EQ(R(2, 2), 1.0);\n\n  EXPECT_FLOAT_EQ(R(0, 1), 0);\n  EXPECT_FLOAT_EQ(R(0, 2), 0);\n  EXPECT_FLOAT_EQ(R(1, 0), 0);\n  EXPECT_FLOAT_EQ(R(1, 2), 0);\n  EXPECT_FLOAT_EQ(R(2, 0), 0);\n  EXPECT_FLOAT_EQ(R(2, 1), 0);\n\n  EXPECT_FLOAT_EQ(trs(0), 0.5);\n  EXPECT_FLOAT_EQ(trs(1), 0.5);\n  EXPECT_FLOAT_EQ(trs(2), 1);\n\n  // check on the GPU\n  std::vector<float3> results;\n  launchTransformTestKernel<>(results, cost);\n\n  EXPECT_EQ(results.size(), 3);\n\n  // check diag\n  EXPECT_FLOAT_EQ(results.at(0).x, 1.0 / 10);\n  EXPECT_FLOAT_EQ(results.at(1).y, 1.0 / (20));\n\n  EXPECT_FLOAT_EQ(results.at(0).y, 0);\n  EXPECT_FLOAT_EQ(results.at(0).z, 0);\n  EXPECT_FLOAT_EQ(results.at(1).x, 0);\n  EXPECT_FLOAT_EQ(results.at(1).z, 0);\n\n  EXPECT_FLOAT_EQ(results.at(2).x, 0.5);\n  EXPECT_FLOAT_EQ(results.at(2).y, 0.5);\n  EXPECT_FLOAT_EQ(results.at(2).z, 1);\n\n  int counter = 0;\n  for (int i = 0; i < 2 * 10; i++)\n  {\n    for (int j = 0; j < 2 * 20; j++)\n    {\n      EXPECT_FLOAT_EQ(costmap.at(counter).x, counter + 1);\n      EXPECT_FLOAT_EQ(costmap.at(counter).y, (counter + 1) * 10);\n      EXPECT_FLOAT_EQ(costmap.at(counter).z, (counter + 1) * 100);\n      EXPECT_FLOAT_EQ(costmap.at(counter).w, (counter + 1) * 1000);\n      counter++;\n    }\n  }\n}\n\nTEST(ARStandardCost, costmapToTextureNoSizeTest)\n{\n  ARStandardCost cost;\n  cost.GPUSetup();\n\n  testing::internal::CaptureStderr();\n  cost.costmapToTexture();\n  std::string error_msg = testing::internal::GetCapturedStderr();\n\n  EXPECT_NE(error_msg, \"\");\n}\n\nTEST(ARStandardCost, costmapToTextureNoLoadTest)\n{\n  ARStandardCost cost;\n  cost.GPUSetup();\n\n  cost.changeCostmapSize(4, 8);\n\n  cost.costmapToTexture();\n\n  std::vector<float4> results;\n  std::vector<float2> query_points;\n  float2 point;\n  point.x = 0.0f;\n  point.y = 0.0f;\n  query_points.push_back(point);\n  point.x = 1.0f;\n  point.y = 0.0f;\n  query_points.push_back(point);\n\n  launchTextureTestKernel<>(cost, results, query_points);\n\n  EXPECT_EQ(query_points.size(), results.size());\n\n  EXPECT_FLOAT_EQ(results.at(0).x, 0);\n  EXPECT_FLOAT_EQ(results.at(0).y, 0);\n  EXPECT_FLOAT_EQ(results.at(0).z, 0);\n  EXPECT_FLOAT_EQ(results.at(0).w, 0);\n\n  EXPECT_FLOAT_EQ(results.at(1).x, 0);\n  EXPECT_FLOAT_EQ(results.at(1).y, 0);\n  EXPECT_FLOAT_EQ(results.at(1).z, 0);\n  EXPECT_FLOAT_EQ(results.at(1).w, 0);\n}\n\nTEST(ARStandardCost, costmapToTextureLoadTest)\n{\n  ARStandardCost cost;\n  cost.GPUSetup();\n\n  Eigen::Matrix3f R;\n  Eigen::Array3f trs;\n\n  std::vector<float4> costmap = cost.loadTrackData(mppi::tests::test_map_file);\n  cost.costmapToTexture();\n\n  std::vector<float4> results;\n  std::vector<float2> query_points(8);\n  query_points[0].x = 0;\n  query_points[0].y = 0;\n  query_points[1].x = 0.1;\n  query_points[1].y = 0;\n  query_points[2].x = 0.5;\n  query_points[2].y = 0;\n  query_points[3].x = 1.0;\n  query_points[3].y = 0;\n  query_points[4].x = 0.0;\n  query_points[4].y = 0.5;\n  query_points[5].x = 0.0;\n  query_points[5].y = 1.0;\n  query_points[6].x = 0.5;\n  query_points[6].y = 0.5;\n  query_points[7].x = 1.0;\n  query_points[7].y = 1.0;\n\n  launchTextureTestKernel<>(cost, results, query_points);\n\n  EXPECT_EQ(query_points.size(), results.size());\n  // 0,0\n  EXPECT_FLOAT_EQ(results.at(0).x, 1);\n  EXPECT_FLOAT_EQ(results.at(0).y, 10);\n  EXPECT_FLOAT_EQ(results.at(0).z, 100);\n  EXPECT_FLOAT_EQ(results.at(0).w, 1000);\n  // 0.1, 0\n  EXPECT_FLOAT_EQ(results.at(1).x, 2);\n  EXPECT_FLOAT_EQ(results.at(1).y, 20);\n  EXPECT_FLOAT_EQ(results.at(1).z, 200);\n  EXPECT_FLOAT_EQ(results.at(1).w, 2000);\n  // 0.5, 0\n  EXPECT_FLOAT_EQ(results.at(2).x, 11);\n  EXPECT_FLOAT_EQ(results.at(2).y, 110);\n  EXPECT_FLOAT_EQ(results.at(2).z, 1100);\n  EXPECT_FLOAT_EQ(results.at(2).w, 11000);\n  // 1.0, 0\n  EXPECT_FLOAT_EQ(results.at(3).x, 20);\n  EXPECT_FLOAT_EQ(results.at(3).y, 200);\n  EXPECT_FLOAT_EQ(results.at(3).z, 2000);\n  EXPECT_FLOAT_EQ(results.at(3).w, 20000);\n  // 0.0, 0.5\n  EXPECT_FLOAT_EQ(results.at(4).x, 401);\n  EXPECT_FLOAT_EQ(results.at(4).y, 4010);\n  EXPECT_FLOAT_EQ(results.at(4).z, 40100);\n  EXPECT_FLOAT_EQ(results.at(4).w, 401000);\n  // 0.0, 1.0\n  EXPECT_FLOAT_EQ(results.at(5).x, 781);\n  EXPECT_FLOAT_EQ(results.at(5).y, 7810);\n  EXPECT_FLOAT_EQ(results.at(5).z, 78100);\n  EXPECT_FLOAT_EQ(results.at(5).w, 781000);\n  // 0.5, 0.5\n  EXPECT_FLOAT_EQ(results.at(6).x, 411);\n  EXPECT_FLOAT_EQ(results.at(6).y, 4110);\n  EXPECT_FLOAT_EQ(results.at(6).z, 41100);\n  EXPECT_FLOAT_EQ(results.at(6).w, 411000);\n  // 1,1\n  EXPECT_FLOAT_EQ(results.at(7).x, 800);\n  EXPECT_FLOAT_EQ(results.at(7).y, 8000);\n  EXPECT_FLOAT_EQ(results.at(7).z, 80000);\n  EXPECT_FLOAT_EQ(results.at(7).w, 800000);\n}\n\nTEST(ARStandardCost, costmapToTextureTransformTest)\n{\n  ARStandardCost cost;\n  cost.GPUSetup();\n\n  std::vector<float4> costmap = cost.loadTrackData(mppi::tests::test_map_file);\n  cost.costmapToTexture();\n\n  std::vector<float4> results;\n  std::vector<float2> query_points(8);\n  query_points[0].x = -5;\n  query_points[0].y = -10;\n  query_points[1].x = -4;\n  query_points[1].y = -10;\n  query_points[2].x = 0;\n  query_points[2].y = -10;\n  query_points[3].x = 5;\n  query_points[3].y = -10;\n  query_points[4].x = -5;\n  query_points[4].y = 0;\n  query_points[5].x = -5;\n  query_points[5].y = 10;\n  query_points[6].x = 0;\n  query_points[6].y = 0;\n  query_points[7].x = 5;\n  query_points[7].y = 10;\n\n  launchTextureTransformTestKernel<>(cost, results, query_points);\n\n  EXPECT_EQ(query_points.size(), results.size());\n  // 0,0\n  EXPECT_FLOAT_EQ(results.at(0).x, 1);\n  EXPECT_FLOAT_EQ(results.at(0).y, 10);\n  EXPECT_FLOAT_EQ(results.at(0).z, 100);\n  EXPECT_FLOAT_EQ(results.at(0).w, 1000);\n  // 0.1, 0\n  EXPECT_FLOAT_EQ(results.at(1).x, 2);\n  EXPECT_FLOAT_EQ(results.at(1).y, 20);\n  EXPECT_FLOAT_EQ(results.at(1).z, 200);\n  EXPECT_FLOAT_EQ(results.at(1).w, 2000);\n  // 0.5, 0\n  EXPECT_FLOAT_EQ(results.at(2).x, 11);\n  EXPECT_FLOAT_EQ(results.at(2).y, 110);\n  EXPECT_FLOAT_EQ(results.at(2).z, 1100);\n  EXPECT_FLOAT_EQ(results.at(2).w, 11000);\n  // 1.0, 0\n  EXPECT_FLOAT_EQ(results.at(3).x, 20);\n  EXPECT_FLOAT_EQ(results.at(3).y, 200);\n  EXPECT_FLOAT_EQ(results.at(3).z, 2000);\n  EXPECT_FLOAT_EQ(results.at(3).w, 20000);\n  // 0.0, 0.5\n  EXPECT_FLOAT_EQ(results.at(4).x, 401);\n  EXPECT_FLOAT_EQ(results.at(4).y, 4010);\n  EXPECT_FLOAT_EQ(results.at(4).z, 40100);\n  EXPECT_FLOAT_EQ(results.at(4).w, 401000);\n  // 0.0, 1.0\n  EXPECT_FLOAT_EQ(results.at(5).x, 781);\n  EXPECT_FLOAT_EQ(results.at(5).y, 7810);\n  EXPECT_FLOAT_EQ(results.at(5).z, 78100);\n  EXPECT_FLOAT_EQ(results.at(5).w, 781000);\n  // 0.5, 0.5\n  EXPECT_FLOAT_EQ(results.at(6).x, 411);\n  EXPECT_FLOAT_EQ(results.at(6).y, 4110);\n  EXPECT_FLOAT_EQ(results.at(6).z, 41100);\n  EXPECT_FLOAT_EQ(results.at(6).w, 411000);\n  // 1,1\n  EXPECT_FLOAT_EQ(results.at(7).x, 800);\n  EXPECT_FLOAT_EQ(results.at(7).y, 8000);\n  EXPECT_FLOAT_EQ(results.at(7).z, 80000);\n  EXPECT_FLOAT_EQ(results.at(7).w, 800000);\n}\n\nTEST(ARStandardCost, TerminalCostTest)\n{\n  ARStandardCost cost;\n\n  cost.GPUSetup();\n\n  std::vector<std::array<float, 7>> states;\n\n  std::array<float, 7> array = { 0.0 };\n  array[0] = 3.0;     // X\n  array[1] = 0.0;     // Y\n  array[2] = M_PI_2;  // Theta\n  array[3] = 0.0;     // Roll\n  array[4] = 2.0;     // Vx\n  array[5] = 1.0;     // Vy\n  array[6] = 0.1;     // Yaw dot\n  states.push_back(array);\n\n  std::vector<float> cost_results;\n\n  launchTerminalCostTestKernel<>(cost, states, cost_results);\n  EXPECT_FLOAT_EQ(cost_results[0], 0.0);\n}\n\nTEST(ARStandardCost, getSpeedCostTest)\n{\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.desired_speed = 25;\n  params.speed_coeff = 10;\n  cost.setParams(params);\n\n  float state[7];\n  for (int i = 0; i < 7; i++)\n  {\n    state[i] = 0;\n  }\n  int crash = 0;\n  state[4] = 10;\n\n  float result = cost.getSpeedCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 15 * 15 * 10);\n\n  params.desired_speed = 0;\n  params.speed_coeff = 100;\n  cost.setParams(params);\n\n  result = cost.getSpeedCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 10 * 10 * 100);\n}\n\nTEST(ARStandardCost, getStablizingCostTest)\n{\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.slip_coeff = 25;\n  params.crash_coeff = 1000;\n  params.max_slip_ang = 0.5;\n  cost.setParams(params);\n\n  float state[7];\n  for (int i = 0; i < 7; i++)\n  {\n    state[i] = 0;\n  }\n  state[4] = 0.1;\n  state[5] = 0.0;\n  int crash = 0;\n\n  float result = cost.getStabilizingCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 0);\n  EXPECT_EQ(crash, 0);\n\n  state[5] = 0.01;\n\n  result = cost.getStabilizingCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 0.2483460072);\n  EXPECT_EQ(crash, 0);\n\n  state[5] = 0.2;\n\n  result = cost.getStabilizingCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 1030.6444);\n  EXPECT_EQ(crash, 0);\n\n  state[3] = 1.6;\n  state[5] = 0.0;\n\n  result = cost.getStabilizingCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 0.0);\n  EXPECT_EQ(crash, 1);\n\n  state[3] = -1.6;\n\n  result = cost.getStabilizingCost(state, &crash);\n\n  EXPECT_FLOAT_EQ(result, 0.0);\n  EXPECT_EQ(crash, 1);\n}\n\nTEST(ARStandardCost, getCrashCostTest)\n{\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.crash_coeff = 10000;\n  cost.setParams(params);\n\n  float state[7];\n  for (int i = 0; i < 7; i++)\n  {\n    state[i] = 0;\n  }\n  int crash = 0;\n  state[4] = 10;\n\n  float result = cost.getCrashCost(state, &crash, 10);\n\n  EXPECT_FLOAT_EQ(result, 0);\n\n  crash = 1;\n  result = cost.getCrashCost(state, &crash, 10);\n\n  EXPECT_FLOAT_EQ(result, 10000);\n}\n\nfloat calculateStandardCostmapValue(ARStandardCost& cost, float3 state, int width, int height, float x_min, float x_max,\n                                    float y_min, float y_max, int ppm)\n{\n  float x_front = state.x + cost.FRONT_D * cosf(state.z);\n  float y_front = state.y + cost.FRONT_D * sinf(state.z);\n  float x_back = state.x + cost.BACK_D * cosf(state.z);\n  float y_back = state.y + cost.BACK_D * sinf(state.z);\n\n  float new_x = max(min(x_front - x_min, x_max - x_min), 0.0f);\n  float new_y = max(min(y_front - y_min, y_max - y_min), 0.0f);\n\n  float front = fabs(height / 2.0f - (new_y)) + (new_x) / width;\n  // std::cout << \"front point = \" << new_x << \", \" << new_y << \" = \" << front << std::endl;\n\n  new_x = max(min(x_back - x_min + 1.0 / (width * ppm), x_max - x_min), 0.0f);\n  new_y = max(min(y_back - y_min + 1.0 / (height * ppm), y_max - y_min), 0.0f);\n\n  float back = fabs(height / 2.0f - (new_y)) + (new_x) / width;\n  // std::cout << \"back point = \" << new_x << \", \" << new_y << \" = \" << back << std::endl;\n  return (front + back) / 2.0f;\n}\n\nTEST(ARStandardCost, getTrackCostTest)\n{\n  std::cout << \"==========================\\n\\n\" << std::endl;\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.track_coeff = 1;\n  params.track_slop = 0.0;\n  params.boundary_threshold = 1.0;\n  cost.setParams(params);\n\n  cost.GPUSetup();\n\n  cost.loadTrackData(mppi::tests::standard_test_map_file);\n\n  std::vector<float3> states(4);\n  states[0].x = -13.5;\n  states[0].y = -10;\n  states[0].z = 0.0;  // theta\n  states[1].x = 0;\n  states[1].y = -10.0;\n  states[1].z = 0.0;  // theta\n  states[2].x = 0.0;\n  states[2].y = 0.0;\n  states[2].z = M_PI / 2;  // theta\n  states[3].x = 3.0;\n  states[3].y = 0.0;\n  states[3].z = M_PI_2;  // theta\n\n  std::vector<float> cost_results;\n  std::vector<int> crash_results;\n\n  launchTrackCostTestKernel<>(cost, states, cost_results, crash_results);\n\n  EXPECT_NEAR(cost_results[0],\n              params.track_coeff * calculateStandardCostmapValue(cost, states[0], 30, 30, -13, 17, -10, 20, 20), 0.001);\n  EXPECT_FLOAT_EQ(crash_results[0], 1.0);\n  EXPECT_NEAR(cost_results[1],\n              params.track_coeff * calculateStandardCostmapValue(cost, states[1], 30, 30, -13, 17, -10, 20, 20), 0.001);\n  EXPECT_FLOAT_EQ(crash_results[1], 1.0);\n  EXPECT_NEAR(cost_results[2],\n              params.track_coeff * calculateStandardCostmapValue(cost, states[2], 30, 30, -13, 17, -10, 20, 20), 0.1);\n  EXPECT_FLOAT_EQ(crash_results[2], 1.0);\n  EXPECT_NEAR(cost_results[3],\n              params.track_coeff * calculateStandardCostmapValue(cost, states[3], 30, 30, -13, 17, -10, 20, 20), 0.1);\n  EXPECT_FLOAT_EQ(crash_results[3], 1.0);\n}\n\nTEST(ARStandardCost, computeCostIndividualTest)\n{\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.track_coeff = 0;\n  params.speed_coeff = 0;\n  params.crash_coeff = 0.0;\n  params.slip_coeff = 0.0;\n  params.control_cost_coeff[0] = 0.0;\n  params.control_cost_coeff[1] = 0.0;\n  params.discount = 0.9;\n  cost.setParams(params);\n\n  cost.GPUSetup();\n\n  cost.loadTrackData(mppi::tests::standard_test_map_file);\n  params = cost.getParams();\n\n  std::vector<std::array<float, 9>> states;\n\n  std::array<float, 9> array = { 0.0 };\n  array[0] = 3.0;     // X\n  array[1] = 0.0;     // Y\n  array[2] = M_PI_2;  // Theta\n  array[3] = 0.0;     // Roll\n  array[4] = 2.0;     // Vx\n  array[5] = 1.0;     // Vy\n  array[6] = 0.1;     // Yaw dot\n  array[7] = 0.5;     // steering\n  array[8] = 0.3;     // throttle\n  states.push_back(array);\n\n  std::vector<float> cost_results;\n  std::vector<int> timestep;\n  timestep.push_back(1);\n  std::vector<int> crash;\n  crash.push_back(0);\n\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], 0.0);\n\n  params.speed_coeff = 4.25;\n  cost.setParams(params);\n\n  float speed_cost = powf(4.0, 2) * 4.25;\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], speed_cost);\n\n  params.speed_coeff = 0.0;\n  params.slip_coeff = 10;\n  cost.setParams(params);\n\n  float slip_cost = powf(-atanf(1.0 / 2.0), 2) * 10;\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], slip_cost);\n\n  params.slip_coeff = 0.0;\n  params.track_coeff = 200.0;\n  cost.setParams(params);\n\n  float track_cost = 1116.3333;\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], track_cost);\n\n  params.track_coeff = 0.0;\n  params.crash_coeff = 10000;\n  cost.setParams(params);\n\n  float crash_cost = 9000;\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], crash_cost);\n\n  params.speed_coeff = 4.25;\n  params.track_coeff = 200;\n  params.slip_coeff = 10;\n  params.crash_coeff = 10000;\n  cost.setParams(params);\n\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], speed_cost + slip_cost + track_cost + crash_cost);\n\n  timestep[0] = 4;\n\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], speed_cost + slip_cost + track_cost + powf(0.9, timestep[0]) * params.crash_coeff);\n}\n\nTEST(ARStandardCost, computeCostOverflowTest)\n{\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.track_coeff = 0;\n  params.speed_coeff = 10;\n  params.crash_coeff = 0.0;\n  params.slip_coeff = 0.0;\n  params.control_cost_coeff[0] = 0.0;\n  params.control_cost_coeff[1] = 0.0;\n  params.desired_speed = ARStandardCost::MAX_COST_VALUE;\n  cost.setParams(params);\n\n  cost.GPUSetup();\n\n  cost.loadTrackData(mppi::tests::standard_test_map_file);\n  params = cost.getParams();\n\n  std::vector<std::array<float, 9>> states;\n\n  std::array<float, 9> array = { 0.0 };\n  array[0] = 3.0;     // X\n  array[1] = 0.0;     // Y\n  array[2] = M_PI_2;  // Theta\n  array[3] = 0.0;     // Roll\n  array[4] = 2.0;     // Vx\n  array[5] = 1.0;     // Vy\n  array[6] = 0.1;     // Yaw dot\n  array[7] = 0.5;     // steering\n  array[8] = 0.3;     // throttle\n  states.push_back(array);\n\n  std::vector<float> cost_results;\n\n  std::vector<int> timestep;\n  timestep.push_back(1);\n  std::vector<int> crash;\n  crash.push_back(0);\n\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], ARStandardCost::MAX_COST_VALUE);\n\n  cost_results[0] = 0;\n\n  params.desired_speed = NAN;\n  cost.setParams(params);\n  launchComputeCostTestKernel<>(cost, states, cost_results, timestep, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], ARStandardCost::MAX_COST_VALUE);\n}\n\nTEST(ARStandardCost, matchAutoRallyTest)\n{\n  ARStandardCost cost;\n  ARStandardCostParams params;\n  params.desired_speed = 6.0;\n  params.track_coeff = 200;\n  params.speed_coeff = 4.25;\n  params.crash_coeff = 10000.0;\n  params.max_slip_ang = 1.25;\n  params.slip_coeff = 10.0;\n  params.track_slop = 0.0;\n  params.boundary_threshold = 0.65;\n  params.control_cost_coeff[0] = 0.0;\n  params.control_cost_coeff[1] = 0.0;\n  params.discount = 0.9;\n  cost.setParams(params);\n\n  cost.GPUSetup();\n\n  cost.loadTrackData(mppi::tests::ccrf_map);\n\n  params = cost.getParams();\n\n  std::vector<std::array<float, 9>> states;\n  std::vector<int> timesteps;\n  std::vector<int> crash;\n\n  std::array<float, 9> array = { 0.0 };\n\n  // input state 19.113796 -36.497066 7.431655 0.055569 4.922609 0.069374 -1.785045\n  // input control -0.134569 -0.485720\n  // timestep 1\n  // crash = 0\n  // cost returned 122.039955\n  array = { 19.113796, -36.497066, 7.431655, 0.055569, 4.922609, 0.069374, -1.785045, -0.134569, -0.485720 };\n  states.push_back(array);\n  timesteps.push_back(1);\n  crash.push_back(0);\n\n  // input state 19.076693 -36.588978 7.397079 0.066307 4.951416 0.068694 -1.694298\n  // input control 0.183665 -0.441535\n  // timestep 1\n  // cost returned 128.190033\n  // crash = 0\n  array = { 19.076693, -36.588978, 7.397079, 0.066307, 4.951416, 0.068694, -1.694298, 0.183665, -0.441535 };\n  states.push_back(array);\n  timesteps.push_back(1);\n  crash.push_back(0);\n\n  // input state -1.470056 -13.121619 1.015522 -0.076247 5.938981 0.375569 2.230900\n  // input control 0.273864 -0.176446\n  // timestep, crash 1 0\n  // cost returned 9137.753906\n  array = { -1.470056, -13.121619, 1.015522, -0.076247, 5.938981, 0.375569, 2.230900, 0.273864, -0.176446 };\n  states.push_back(array);\n  timesteps.push_back(1);\n  crash.push_back(0);\n\n  // input state -1.413821 -13.016726 0.970904 -0.079068 5.893999 0.607982 2.259659\n  // input control 0.225313 -0.035700\n  // timestep, crash 2 1\n  // cost returned 8402.457031\n  array = { -1.413821, -13.016726, 0.970904, -0.079068, 5.893999, 0.607982, 2.259659, 0.225313, -0.035700 };\n  states.push_back(array);\n  timesteps.push_back(2);\n  crash.push_back(1);\n\n  // input state 7.273710 -10.255844 -0.030983 -0.067543 4.353069 -0.036942 -0.177069\n  // input control -0.038980 -0.335935\n  // timestep, crash 99 1\n  // cost returned 2857.133545\n  array = { 7.273710, -10.255844, -0.030983, -0.067543, 4.353069, -0.036942, -0.177069, -0.038980, -0.335935 };\n  states.push_back(array);\n  timesteps.push_back(99);\n  crash.push_back(1);\n\n  std::vector<float> cost_results;\n\n  launchComputeCostTestKernel<>(cost, states, cost_results, timesteps, crash);\n  EXPECT_FLOAT_EQ(cost_results[0], 122.039955);\n  EXPECT_EQ(crash[0], 0);\n  EXPECT_FLOAT_EQ(cost_results[1], 128.190033);\n  EXPECT_EQ(crash[1], 0);\n  EXPECT_FLOAT_EQ(cost_results[2], 9137.753906);\n  EXPECT_EQ(crash[2], 1);\n  EXPECT_FLOAT_EQ(cost_results[3], 8402.457031);\n  EXPECT_EQ(crash[3], 1);\n  EXPECT_FLOAT_EQ(cost_results[4], 2857.133545);\n  EXPECT_EQ(crash[4], 1);\n}\n"
  },
  {
    "path": "tests/cost_functions/cartpole_quadratic_cost_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n#include <kernel_tests/cost_functions/cartpole/cartpole_quadratic_cost_kernel_test.cuh>\n#include <array>\n\nTEST(CartpoleQuadraticCost, Constructor)\n{\n  CartpoleQuadraticCost cost;\n\n  // Test passes if the object is constructed without runetime error.\n}\n\nTEST(CartpoleQuadraticCost, BindStream)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n  CartpoleQuadraticCost cost(stream);\n  EXPECT_EQ(cost.stream_, stream) << \"Stream binding failure.\";\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(CartpoleQuadraticCost, GPUMemoryNull)\n{\n  CartpoleQuadraticCost cost;\n  ASSERT_EQ(cost.cost_d_, nullptr);\n}\n\nTEST(CartpoleQuadraticCost, GPUSetup)\n{\n  CartpoleQuadraticCost cost;\n  cost.GPUSetup();\n\n  ASSERT_FALSE(cost.cost_d_ == nullptr);\n}\n\nTEST(CartpoleQuadraticCost, SetParamsCPU)\n{\n  CartpoleQuadraticCostParams new_params;\n  new_params.cart_position_coeff = 3;\n  new_params.pole_angle_coeff = 3;\n  new_params.cart_velocity_coeff = 3;\n  new_params.pole_angular_velocity_coeff = 3;\n  new_params.control_cost_coeff[0] = 5;\n  new_params.terminal_cost_coeff = 20;\n  new_params.desired_terminal_state[0] = 3;\n  new_params.desired_terminal_state[1] = 2;\n  new_params.desired_terminal_state[2] = 3.14;\n  new_params.desired_terminal_state[3] = 1;\n\n  CartpoleQuadraticCost cost;\n\n  cost.setParams(new_params);\n  auto current_params = cost.getParams();\n\n  EXPECT_FLOAT_EQ(new_params.cart_position_coeff, current_params.cart_position_coeff);\n  EXPECT_FLOAT_EQ(new_params.pole_angle_coeff, current_params.pole_angle_coeff);\n  EXPECT_FLOAT_EQ(new_params.cart_velocity_coeff, current_params.cart_velocity_coeff);\n  EXPECT_FLOAT_EQ(new_params.pole_angular_velocity_coeff, current_params.pole_angular_velocity_coeff);\n  EXPECT_FLOAT_EQ(new_params.control_cost_coeff[0], current_params.control_cost_coeff[0]);\n  EXPECT_FLOAT_EQ(new_params.terminal_cost_coeff, current_params.terminal_cost_coeff);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[0], current_params.desired_terminal_state[0]);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[1], current_params.desired_terminal_state[1]);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[2], current_params.desired_terminal_state[2]);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[3], current_params.desired_terminal_state[3]);\n}\n\nTEST(CartpoleQuadraticCost, SetParamsGPU)\n{\n  CartpoleQuadraticCost cost;\n  cost.GPUSetup();\n\n  CartpoleQuadraticCostParams new_params;\n  new_params.cart_position_coeff = 5;\n  new_params.pole_angle_coeff = 6;\n  new_params.cart_velocity_coeff = 7;\n  new_params.pole_angular_velocity_coeff = 8;\n  new_params.control_cost_coeff[0] = 9;\n  new_params.terminal_cost_coeff = 2000;\n  new_params.desired_terminal_state[0] = 3;\n  new_params.desired_terminal_state[1] = 2;\n  new_params.desired_terminal_state[2] = 3.14;\n  new_params.desired_terminal_state[3] = 1;\n\n  CartpoleQuadraticCostParams gpu_params;\n\n  cost.setParams(new_params);\n\n  if (cost.GPUMemStatus_)\n  {\n    // Launch kernel to grab parameters from the GPU\n    launchParameterTestKernel(cost, gpu_params);\n  }\n  else\n  {\n    FAIL() << \"GPU Setup has not been called or is not functioning.\";\n  }\n\n  EXPECT_FLOAT_EQ(new_params.cart_position_coeff, gpu_params.cart_position_coeff);\n  EXPECT_FLOAT_EQ(new_params.pole_angle_coeff, gpu_params.pole_angle_coeff);\n  EXPECT_FLOAT_EQ(new_params.cart_velocity_coeff, gpu_params.cart_velocity_coeff);\n  EXPECT_FLOAT_EQ(new_params.pole_angular_velocity_coeff, gpu_params.pole_angular_velocity_coeff);\n  EXPECT_FLOAT_EQ(new_params.control_cost_coeff[0], gpu_params.control_cost_coeff[0]);\n  EXPECT_FLOAT_EQ(new_params.terminal_cost_coeff, gpu_params.terminal_cost_coeff);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[0], gpu_params.desired_terminal_state[0]);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[1], gpu_params.desired_terminal_state[1]);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[2], gpu_params.desired_terminal_state[2]);\n  EXPECT_FLOAT_EQ(new_params.desired_terminal_state[3], gpu_params.desired_terminal_state[3]);\n}\nTEST(CartpoleQuadraticCost, ComputeStateCost)\n{\n  CartpoleQuadraticCost cost;\n\n  CartpoleQuadraticCost::output_array state;\n  state << 1, 2, 3, 4;\n\n  float cost_compute = cost.computeStateCost(state);\n  float cost_known =\n      (state[0] - cost.getParams().desired_terminal_state[0]) *\n          (state(0) - cost.getParams().desired_terminal_state[0]) * cost.getParams().cart_position_coeff +\n      (state[1] - cost.getParams().desired_terminal_state[1]) *\n          (state(1) - cost.getParams().desired_terminal_state[1]) * cost.getParams().cart_velocity_coeff +\n      (state[2] - cost.getParams().desired_terminal_state[2]) *\n          (state(2) - cost.getParams().desired_terminal_state[2]) * cost.getParams().pole_angle_coeff +\n      (state[3] - cost.getParams().desired_terminal_state[3]) *\n          (state(3) - cost.getParams().desired_terminal_state[3]) * cost.getParams().pole_angular_velocity_coeff;\n\n  ASSERT_EQ(cost_known, cost_compute);\n}\n\nTEST(CartpoleQuadraticCost, ComputeRunningCost)\n{\n  CartpoleQuadraticCost cost;\n\n  CartpoleQuadraticCost::output_array state;\n  CartpoleQuadraticCost::control_array control, noise, std_dev;\n  state << 5, 3, 2, 4;\n  control << 6;\n  noise << 0.3;\n  std_dev << 2;\n  int timestep = 0;\n  int crash_status[1] = { 0 };\n\n  float cost_compute = cost.computeRunningCost(state, control, timestep, crash_status);\n  float cost_known =\n      (state[0] - cost.getParams().desired_terminal_state[0]) *\n          (state(0) - cost.getParams().desired_terminal_state[0]) * cost.getParams().cart_position_coeff +\n      (state[1] - cost.getParams().desired_terminal_state[1]) *\n          (state(1) - cost.getParams().desired_terminal_state[1]) * cost.getParams().cart_velocity_coeff +\n      (state[2] - cost.getParams().desired_terminal_state[2]) *\n          (state(2) - cost.getParams().desired_terminal_state[2]) * cost.getParams().pole_angle_coeff +\n      (state[3] - cost.getParams().desired_terminal_state[3]) *\n          (state(3) - cost.getParams().desired_terminal_state[3]) * cost.getParams().pole_angular_velocity_coeff;\n\n  cost_known = cost_known;\n  ASSERT_EQ(cost_known, cost_compute);\n}\n\nTEST(CartpoleQuadraticCost, ComputeTerminalCost)\n{\n  CartpoleQuadraticCost cost;\n\n  std::array<float, 4> state = { 2.f, 3.f, 7.f, 43.f };\n\n  // float cost_compute = cost.terminalCost(state);\n  float cost_compute = 0.0f;\n  float cost_known =\n      ((state[0] - cost.getParams().desired_terminal_state[0]) *\n           (state[0] - cost.getParams().desired_terminal_state[0]) * cost.getParams().cart_position_coeff +\n       (state[1] - cost.getParams().desired_terminal_state[1]) *\n           (state[1] - cost.getParams().desired_terminal_state[1]) * cost.getParams().cart_velocity_coeff +\n       (state[2] - cost.getParams().desired_terminal_state[2]) *\n           (state[2] - cost.getParams().desired_terminal_state[2]) * cost.getParams().pole_angle_coeff +\n       (state[3] - cost.getParams().desired_terminal_state[3]) *\n           (state[3] - cost.getParams().desired_terminal_state[3]) * cost.getParams().pole_angular_velocity_coeff) *\n      cost.getParams().terminal_cost_coeff;\n  ASSERT_EQ(cost_known, cost_compute);\n}\n"
  },
  {
    "path": "tests/cost_functions/general_cost_test.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <kernel_tests/cost_functions/cost_generic_kernel_tests.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/cost_functions/quadrotor/quadrotor_quadratic_cost.cuh>\n\ntemplate <class COST_T>\n__global__ void copyParamsOnGPU(COST_T* cost_d, typename COST_T::COST_PARAMS_T* params_d)\n{\n  *params_d = cost_d->getParams();\n}\n\ntemplate <class COST_T>\nclass GeneralCostTest : public ::testing::Test\n{\npublic:\n  COST_T cost;\n\nprotected:\n  void SetUp() override\n  {\n  }\n\n  void TearDown() override\n  {\n  }\n};\n\nusing DIFFERENT_COSTS = ::testing::Types<DoubleIntegratorCircleCost, CartpoleQuadraticCost>;\n\nTYPED_TEST_SUITE(GeneralCostTest, DIFFERENT_COSTS);\n\nTYPED_TEST(GeneralCostTest, GPUSetup)\n{\n  ASSERT_EQ(this->cost.cost_d_, nullptr);\n  this->cost.GPUSetup();\n  ASSERT_NE(this->cost.cost_d_, nullptr);\n}\n\nTYPED_TEST(GeneralCostTest, VerifyGPUParams)\n{\n  using PARAMS_T = typename TypeParam::COST_PARAMS_T;\n  this->cost.GPUSetup();\n\n  PARAMS_T* params_d;\n  HANDLE_ERROR(cudaMalloc((void**)&params_d, sizeof(PARAMS_T)));\n\n  PARAMS_T params_gpu_after, params_gpu_before;\n  copyParamsOnGPU<TypeParam><<<1, 1>>>(this->cost.cost_d_, params_d);\n  HANDLE_ERROR(cudaMemcpy(&params_gpu_before, params_d, sizeof(PARAMS_T), cudaMemcpyDeviceToHost));\n\n  PARAMS_T cost_params = this->cost.getParams();\n  cost_params.control_cost_coeff[0] = 27;\n  cost_params.discount = 0.97;\n  this->cost.setParams(cost_params);\n  copyParamsOnGPU<TypeParam><<<1, 1>>>(this->cost.cost_d_, params_d);\n  HANDLE_ERROR(cudaMemcpy(&params_gpu_after, params_d, sizeof(PARAMS_T), cudaMemcpyDeviceToHost));\n\n  // Ensure params before params update are different\n  ASSERT_NE(params_gpu_before.discount, cost_params.discount);\n  ASSERT_NE(params_gpu_before.control_cost_coeff[0], cost_params.control_cost_coeff[0]);\n  // Ensure params after params update are the same\n  ASSERT_EQ(params_gpu_after.discount, cost_params.discount);\n  ASSERT_EQ(params_gpu_after.control_cost_coeff[0], cost_params.control_cost_coeff[0]);\n}\n\nTYPED_TEST(GeneralCostTest, CPUvsGPURolloutCost)\n{\n  checkGPURolloutCost<TypeParam>(this->cost, 0.01);\n}\n"
  },
  {
    "path": "tests/cost_functions/general_quadratic_costs.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/cost_functions/quadratic_cost/quadratic_cost.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n\ntypedef QuadraticCost<DoubleIntegratorDynamics> DIQuadCost;\n\nTEST(DIQuadraticCost, Constructor)\n{\n  DIQuadCost();\n}\n\ntemplate <class COST_T>\n__global__ void computeCostKernel(COST_T* cost, float* s, int* t, float* result)\n{\n  int crash = 0;\n  *result = cost->computeStateCost(s, *t, nullptr, &crash);\n}\n\nclass DITargetQuadraticCost : public testing::Test\n{\npublic:\n  using DYN = DoubleIntegratorDynamics;\n  using COST = QuadraticCost<DYN>;\n  DIQuadCost cost;\n  DIQuadCost::COST_PARAMS_T cost_params;\n  DYN::output_array s;\n  dim3 dimBlock;\n  dim3 dimGrid;\n  void SetUp() override\n  {\n    cost_params = cost.getParams();\n    cost_params.s_goal[0] = 1;\n    cost_params.s_goal[1] = -5;\n    cost_params.s_goal[2] = 0;\n    cost_params.s_goal[3] = 0.5;\n    cost.setParams(cost_params);\n\n    s << 0, 0, 0, 0;\n    dimBlock = dim3(10, 11, 5);\n    dimGrid = dim3(1, 1, 1);\n  }\n};\n\nclass DITrajQuadraticCost : public testing::Test\n{\npublic:\n  using DYN = DoubleIntegratorDynamics;\n  static const int TIME_HORIZON = 5;\n  using COST = QuadraticCostTrajectory<DYN, TIME_HORIZON>;\n  COST cost;\n  COST::COST_PARAMS_T cost_params;\n  float s_traj[28] = { 1, -5, 0, 0.5, 2, -5.5, 0, 1, 5, 5, 1, 10, 1, 1, 1, 1, 2, 3, 4, 5, 11, -5, 3, 8, 8, 8, 8, 8 };\n  DYN::output_array s;\n  // GPU Variables\n  dim3 dimBlock;\n  dim3 dimGrid;\n  float* s_dev;\n  float* resulting_cost_d;\n  int* time_d;\n  cudaStream_t stream;\n\n  void SetUp() override\n  {\n    cost_params = cost.getParams();\n    for (int i = 0; i < TIME_HORIZON * DYN::OUTPUT_DIM; i++)\n    {\n      cost_params.s_goal[i] = s_traj[i];\n    }\n    // cost_params.s_goal[0] = 1;\n    // cost_params.s_goal[1] = -5;\n    // cost_params.s_goal[2] = 0;\n    // cost_params.s_goal[3] = 0.5;\n    cost.setParams(cost_params);\n\n    s << 0, 0, 0, 0;\n\n    dimBlock = dim3(10, 11, 5);\n    dimGrid = dim3(1, 1, 1);\n    HANDLE_ERROR(cudaMalloc((void**)&s_dev, sizeof(float) * DYN::OUTPUT_DIM));\n    HANDLE_ERROR(cudaMalloc((void**)&resulting_cost_d, sizeof(float)));\n    HANDLE_ERROR(cudaMalloc((void**)&time_d, sizeof(int)));\n\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n    cost.bindToStream(stream);\n    cost.GPUSetup();\n  }\n\n  void TearDown() override\n  {\n    HANDLE_ERROR(cudaFree(s_dev));\n    HANDLE_ERROR(cudaFree(resulting_cost_d));\n    HANDLE_ERROR(cudaFree(time_d));\n  }\n};\n\nTEST_F(DITargetQuadraticCost, SimpleStateCostCPU)\n{\n  float state_cost = cost.computeStateCost(s);\n  ASSERT_FLOAT_EQ(state_cost, 26.25);\n}\n\nTEST_F(DITargetQuadraticCost, LateStateCostCPU)\n{\n  float state_cost = cost.computeStateCost(s, 10);\n  ASSERT_FLOAT_EQ(state_cost, 26.25);\n}\n\nTEST_F(DITargetQuadraticCost, WeightStateCostCPU)\n{\n  cost_params.s_coeffs[0] = 10;\n  cost_params.s_coeffs[1] = 5;\n  cost_params.s_coeffs[2] = 7;\n  cost_params.s_coeffs[3] = .1;\n  cost.setParams(cost_params);\n  float state_cost = cost.computeStateCost(s);\n  ASSERT_FLOAT_EQ(state_cost, 10 + 5 * 5 * 5 + 0 + 0.025);\n}\n\nTEST_F(DITargetQuadraticCost, LipschitzConstant)\n{\n  cost_params.s_coeffs[0] = -10;\n  cost_params.s_coeffs[1] = 5;\n  cost_params.s_coeffs[2] = 7;\n  cost_params.s_coeffs[3] = .1;\n  cost.setParams(cost_params);\n\n  float lipschitz_constant = cost.getLipshitzConstantCost();\n  ASSERT_FLOAT_EQ(lipschitz_constant, 20);\n}\n\nTEST_F(DITargetQuadraticCost, StateCostGPU)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n  cost.bindToStream(stream);\n  cost.GPUSetup();\n  int time = 0;\n  float result_cost_h = -1;\n\n  // Setup GPU params\n  float* s_dev;\n  float* resulting_cost_d;\n  int* time_d;\n  HANDLE_ERROR(cudaMalloc((void**)&s_dev, sizeof(float) * DYN::OUTPUT_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&resulting_cost_d, sizeof(float)));\n  HANDLE_ERROR(cudaMalloc((void**)&time_d, sizeof(int)));\n\n  HANDLE_ERROR(cudaMemcpyAsync(s_dev, s.data(), sizeof(float) * DYN::OUTPUT_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(time_d, &time, sizeof(int), cudaMemcpyHostToDevice, stream));\n  computeCostKernel<COST><<<dimGrid, dimBlock, 0, stream>>>(cost.cost_d_, s_dev, time_d, resulting_cost_d);\n\n  HANDLE_ERROR(cudaMemcpyAsync(&result_cost_h, resulting_cost_d, sizeof(float), cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  ASSERT_FLOAT_EQ(result_cost_h, 26.25);\n}\n\nTEST_F(DITargetQuadraticCost, LateStateCostGPU)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n  cost.bindToStream(stream);\n  cost.GPUSetup();\n  int time = 10;\n  float result_cost_h = -1;\n\n  // Setup GPU params\n  float* s_dev;\n  float* resulting_cost_d;\n  int* time_d;\n  HANDLE_ERROR(cudaMalloc((void**)&s_dev, sizeof(float) * DYN::OUTPUT_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&resulting_cost_d, sizeof(float)));\n  HANDLE_ERROR(cudaMalloc((void**)&time_d, sizeof(int)));\n\n  HANDLE_ERROR(cudaMemcpyAsync(s_dev, s.data(), sizeof(float) * DYN::OUTPUT_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(time_d, &time, sizeof(int), cudaMemcpyHostToDevice, stream));\n  computeCostKernel<COST><<<dimGrid, dimBlock, 0, stream>>>(cost.cost_d_, s_dev, time_d, resulting_cost_d);\n\n  HANDLE_ERROR(cudaMemcpyAsync(&result_cost_h, resulting_cost_d, sizeof(float), cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  ASSERT_FLOAT_EQ(result_cost_h, 26.25);\n}\n\nTEST_F(DITrajQuadraticCost, MidTrajStateCostGPU)\n{\n  int time = 2;\n  float result_cost_h = -1;\n\n  // Setup GPU params\n  HANDLE_ERROR(cudaMemcpyAsync(s_dev, s.data(), sizeof(float) * DYN::OUTPUT_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(time_d, &time, sizeof(int), cudaMemcpyHostToDevice, stream));\n  computeCostKernel<COST><<<dimGrid, dimBlock, 0, stream>>>(cost.cost_d_, s_dev, time_d, resulting_cost_d);\n\n  HANDLE_ERROR(cudaMemcpyAsync(&result_cost_h, resulting_cost_d, sizeof(float), cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  float ground_truth = powf(cost_params.s_goal[8], 2) + powf(cost_params.s_goal[9], 2) +\n                       powf(cost_params.s_goal[10], 2) + powf(cost_params.s_goal[11], 2);\n\n  ASSERT_FLOAT_EQ(result_cost_h, ground_truth);\n}\n"
  },
  {
    "path": "tests/cost_functions/quadrotor_map_cost_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/cost_functions/quadrotor/quadrotor_map_cost.cuh>\n#include <mppi/utils/test_helper.h>\n\nTEST(QuadrotorMapCost, checkHeadingCost)\n{\n  GTEST_SKIP_(\"The calculation has fundamentally changed, uses velocity vector and not orientation\");\n  using COST = QuadrotorMapCost;\n  COST cost;\n  COST::output_array curr_state = COST::output_array::Zero();\n  Eigen::Quaternionf temp_quat;\n  float deg2rad = M_PI / 180;\n  // Have velocity in the y direction\n  curr_state[4] = 1;\n  // Get quaternion for yaw of 30 degrees\n  mppi::math::Euler2QuatNWU(0, 0, 30.0 * deg2rad, temp_quat);\n  temp_quat.normalize();\n  curr_state[6] = temp_quat.w();\n  curr_state[7] = temp_quat.x();\n  curr_state[8] = temp_quat.y();\n  curr_state[9] = temp_quat.z();\n  // Current state has yaw of 30 degrees\n  auto params = cost.getParams();\n  params.attitude_coeff = 0;\n  params.heading_coeff = 10;\n  params.curr_waypoint = make_float4(0, 1, 0, 0);\n  cost.setParams(params);\n\n  float expected_cost = params.heading_coeff * powf(M_PI_2 - 30 * deg2rad, 2);\n  float calculated_cost = cost.computeHeadingCost(curr_state.data());\n  EXPECT_FLOAT_EQ(expected_cost, calculated_cost);\n}\n\nTEST(QuadrotorMapCost, checkSpeedCost)\n{\n  using COST = QuadrotorMapCost;\n  COST cost;\n  COST::output_array curr_state = COST::output_array::Zero();\n  Eigen::Quaternionf temp_quat;\n  // Get quaternion for yaw of 30 degrees\n  curr_state[3] = 3;\n  curr_state[4] = 4;\n  // Current state has yaw of 30 degrees\n  auto params = cost.getParams();\n  params.speed_coeff = 10;\n  params.desired_speed = 10;\n  cost.setParams(params);\n\n  float expected_cost = params.speed_coeff * powf(10 - 5, 2);\n  float calculated_cost = cost.computeSpeedCost(curr_state.data());\n  EXPECT_FLOAT_EQ(expected_cost, calculated_cost);\n}\n"
  },
  {
    "path": "tests/cost_functions/quadrotor_quadratic_cost_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/cost_functions/quadrotor/quadrotor_quadratic_cost.cuh>\n#include <mppi/utils/test_helper.h>\n\n#include <array>\n\nTEST(QuadrotorQuadraticCost, Constructor)\n{\n  QuadrotorQuadraticCost();\n}\n\ntemplate <class COST_T>\nvoid __global__ StateCostKernel(COST_T* costs, float* s_d, float* result)\n{\n  result[0] = costs->computeStateCost(s_d);\n  result[1] = costs->terminalCost(s_d);\n}\n\nTEST(QuadrotorQuadraticCost, ControlCost)\n{\n  using COST = QuadrotorQuadraticCost;\n  const int num_results = 2;\n\n  /**\n   * CPU Setup\n   */\n  COST cost;\n  COST::output_array s = COST::output_array::Random();\n  Eigen::Quaternionf q_test(s[6], s[7], s[8], s[9]);\n  q_test.normalize();\n  s[6] = q_test.w();\n  s[7] = q_test.x();\n  s[8] = q_test.y();\n  s[9] = q_test.z();\n\n  // COST::control_array std_dev = COST::control_array::Constant(0.5);\n  // float lambda = 0.8;\n  QuadrotorQuadraticCostParams new_params;\n  new_params.x_coeff = 5;\n  cost.setParams(new_params);\n  /**\n   * GPU Setup\n   */\n  cost.GPUSetup();\n  cudaStream_t stream_t;\n  cudaStreamCreate(&stream_t);\n  float* s_d;\n  // float* eps_d;\n  // float* std_dev_d;\n  float* GPU_result_d;\n  // Allocate Memory on GPU\n  size_t state_size = sizeof(float) * COST::OUTPUT_DIM;\n  HANDLE_ERROR(cudaMalloc((void**)&s_d, state_size));\n  // HANDLE_ERROR(cudaMalloc((void**)&eps_d, state_size));\n  // HANDLE_ERROR(cudaMalloc((void**)&std_dev_d, state_size));\n  HANDLE_ERROR(cudaMalloc((void**)&GPU_result_d, sizeof(float) * num_results));\n\n  // Copy data to GPU\n  HANDLE_ERROR(cudaMemcpyAsync(s_d, s.data(), state_size, cudaMemcpyHostToDevice, stream_t));\n  // HANDLE_ERROR(cudaMemcpyAsync(eps_d, eps.data(), state_size,\n  //                              cudaMemcpyHostToDevice, stream_t));\n  // HANDLE_ERROR(cudaMemcpyAsync(std_dev_d, std_dev.data(), state_size,\n  //                              cudaMemcpyHostToDevice, stream_t));\n  HANDLE_ERROR(cudaStreamSynchronize(stream_t));\n\n  std::array<float, num_results> CPU_result;\n  CPU_result[0] = cost.computeStateCost(s);\n  CPU_result[1] = cost.terminalCost(s);\n\n  // call GPU Kernel\n  std::array<float, num_results> GPU_result;\n  dim3 dimBlock(1, 1, 1);\n  dim3 dimGrid(1, 1, 1);\n  StateCostKernel<COST><<<dimGrid, dimBlock, 0, stream_t>>>(cost.cost_d_, s_d, GPU_result_d);\n  CudaCheckError();\n  HANDLE_ERROR(cudaStreamSynchronize(stream_t));\n  // Copy GPU results back to Host\n  HANDLE_ERROR(\n      cudaMemcpyAsync(GPU_result.data(), GPU_result_d, sizeof(float) * num_results, cudaMemcpyDeviceToHost, stream_t));\n  HANDLE_ERROR(cudaStreamSynchronize(stream_t));\n  std::cout << \"State Cost: \" << CPU_result[0] << std::endl;\n  array_assert_float_eq<num_results>(CPU_result, GPU_result);\n}\n"
  },
  {
    "path": "tests/dynamics/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n  target_link_libraries(${T_NAME}\n                        ${GTEST_LIBRARIES}\n                        ${MPPI_HEADER_LIBRARY_NAME})\n  gtest_discover_tests(${T_NAME})\n  set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n\n## Autorally Dynamics Constant Memory Tests\n#add_executable(ar_dynamics_nn_test_constant ../test_main.cpp ar_dynamics_nn_test.cu)\n#target_compile_definitions(ar_dynamics_nn_test_constant PRIVATE MPPI_NNET_USING_CONSTANT_MEM__)\n#target_link_libraries(ar_dynamics_nn_test_constant\n#                      ${GTEST_LIBRARIES}\n#                      cnpy)\n#\n#gtest_add_tests(TARGET ar_dynamics_nn_test_constant)\n#\n#set_target_properties(ar_dynamics_nn_test_constant PROPERTIES FOLDER test)\n"
  },
  {
    "path": "tests/dynamics/angle_utils_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <cuda_runtime.h>\n#include <mppi/utils/angle_utils.cuh>\n#include <random>\n\nTEST(AngleUtils, normalizeAngleKnownDouble)\n{\n  double angle = 0.5;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), 0.5);\n\n  angle = M_PI_2;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), M_PI_2);\n\n  angle = M_PI;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), M_PI);\n\n  angle = -M_PI;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), M_PI);\n\n  angle = -M_PI * 3;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), M_PI);\n\n  angle = M_PI * 8;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), 0);\n}\n\nTEST(AngleUtils, normalizeAngleKnownFloat)\n{\n  float angle = 0.5f;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), 0.5f);\n\n  angle = static_cast<float>(M_PI_2);\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), static_cast<float>(M_PI_2));\n\n  angle = static_cast<float>(M_PI);\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), static_cast<float>(M_PI));\n\n  angle = -static_cast<float>(M_PI);\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), static_cast<float>(M_PI));\n\n  angle = -static_cast<float>(M_PI) * 3.0f + 1e-3f;\n  EXPECT_FLOAT_EQ(angle_utils::normalizeAngle(angle), -static_cast<float>(M_PI) + 1e-3f);\n\n  angle = static_cast<float>(M_PI) * 8.0f;\n  EXPECT_NEAR(angle_utils::normalizeAngle(angle), 0.0f, 1e-6);\n}\n\nTEST(AngleUtils, normalizeAngleRandom)\n{\n  std::random_device rd;\n  std::mt19937 e2(rd());\n  std::uniform_real_distribution<> dist(-100, 100);\n\n  for (int i = 0; i < 1000; i++)\n  {\n    double rand = dist(e2);\n    double result = angle_utils::normalizeAngle(rand);\n    EXPECT_TRUE(result <= M_PI && result > -M_PI) << result;\n  }\n\n  for (int i = 0; i < 1000; i++)\n  {\n    float rand = dist(e2);\n    float result = angle_utils::normalizeAngle(rand);\n    EXPECT_TRUE(result <= M_PI && result > -M_PI) << result;\n  }\n}\n\nTEST(AngleUtils, shortestAngularDistanceDouble)\n{\n  double angle_1 = 0;\n  double angle_2 = 0;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), 0);\n\n  angle_1 = 0;\n  angle_2 = M_PI_2;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI_2);\n\n  angle_1 = 0;\n  angle_2 = -M_PI;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI);\n\n  angle_1 = M_PI_4;\n  angle_2 = -M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), -M_PI_2);\n\n  angle_1 = -M_PI_4;\n  angle_2 = M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI_2);\n\n  angle_1 = 3 * M_PI_4;\n  angle_2 = -3 * M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI_2);\n\n  angle_1 = -3 * M_PI_4;\n  angle_2 = 3 * M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), -M_PI_2);\n}\n\nTEST(AngleUtils, shortestAngularDistanceFloat)\n{\n  float angle_1 = 0;\n  float angle_2 = 0;\n  EXPECT_NEAR(angle_utils::shortestAngularDistance(angle_1, angle_2), 0, 1e-6);\n\n  angle_1 = 0;\n  angle_2 = M_PI_2;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI_2);\n\n  // TODO is right?\n  angle_1 = 0;\n  angle_2 = -M_PI;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI);\n\n  angle_1 = M_PI_4;\n  angle_2 = -M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), -M_PI_2);\n\n  angle_1 = -M_PI_4;\n  angle_2 = M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI_2);\n\n  angle_1 = 3 * M_PI_4;\n  angle_2 = -3 * M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), M_PI_2);\n\n  angle_1 = -3 * M_PI_4;\n  angle_2 = 3 * M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::shortestAngularDistance(angle_1, angle_2), -M_PI_2);\n}\n\nTEST(AngleUtils, interpolateEulerAngleLinearDouble)\n{\n  double angle_1 = 0;\n  double angle_2 = 0;\n  double alpha = 0.5;\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, alpha), 0);\n\n  angle_1 = 0;\n  angle_2 = M_PI_2;\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.0), 0);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.25), M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.5), M_PI_4);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.75), M_PI_4 * 3 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 1.0), M_PI_2);\n\n  angle_1 = M_PI_4;\n  angle_2 = -M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.0), M_PI_4);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.25), M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.5), 0);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.75), -M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 1.0), -M_PI_4);\n\n  angle_1 = 3 * M_PI_4;\n  angle_2 = -3 * M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.0), 3 * M_PI_4);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.25), 7 * M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.5), M_PI);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.75), -7 * M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 1.0), -3 * M_PI_4);\n}\n\nTEST(AngleUtils, interpolateEulerAngleLinearFloat)\n{\n  float angle_1 = 0;\n  float angle_2 = 0;\n  float alpha = 0.5;\n  EXPECT_NEAR(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, alpha), 0, 1e-7);\n\n  angle_1 = 0;\n  angle_2 = M_PI_2;\n  EXPECT_NEAR(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.0f), 0, 1e-7);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.25f), M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.5f), M_PI_4);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.75f), M_PI_4 * 3 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 1.0f), M_PI_2);\n\n  angle_1 = M_PI_4;\n  angle_2 = -M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.0f), M_PI_4);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.25f), M_PI_4 / 2);\n  EXPECT_NEAR(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.5f), 0, 1e-7);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.75f), -M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 1.0f), -M_PI_4);\n\n  angle_1 = 3 * M_PI_4;\n  angle_2 = -3 * M_PI_4;\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.0f), 3 * M_PI_4);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.25f), 7 * M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.5f), M_PI);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 0.75f), -7 * M_PI_4 / 2);\n  EXPECT_FLOAT_EQ(angle_utils::interpolateEulerAngleLinear(angle_1, angle_2, 1.0f), -3 * M_PI_4);\n}\n"
  },
  {
    "path": "tests/dynamics/ar_dynamics_nn_test.cu",
    "content": "//\n// Created by jgibson37 on 1/13/20.\n//\n\n#include <gtest/gtest.h>\n#include <mppi/dynamics/autorally/ar_nn_model.cuh>\n#include <kernel_tests/dynamics/autorally/ar_nn_dynamics_kernel_test.cuh>\n#include <stdio.h>\n#include <math.h>\n\n// Auto-generated header file\n#include <autorally_test_network.h>\n#include \"mppi/ddp/ddp_model_wrapper.h\"\n\n/**\n * Note: the analytical solution for the test NN is outlined in the python script\n */\n\nTEST(ARNeuralNetDynamics, verifyTemplateParamters)\n{\n  int state_dim = NeuralNetModel<7, 2, 3>::STATE_DIM;\n  EXPECT_EQ(state_dim, 7);\n\n  int control_dim = NeuralNetModel<7, 2, 3>::CONTROL_DIM;\n  EXPECT_EQ(control_dim, 2);\n\n  int dynamics_dim = NeuralNetModel<7, 2, 3>::DYNAMICS_DIM;\n  EXPECT_EQ(dynamics_dim, 7 - 3);\n\n  NeuralNetModel<7, 2, 3> model;\n  int* net_structure = model.getHelperPtr()->getNetStructurePtr();\n\n  EXPECT_EQ(net_structure[0], 6);\n  EXPECT_EQ(net_structure[1], 32);\n  EXPECT_EQ(net_structure[2], 32);\n  EXPECT_EQ(net_structure[3], 4);\n\n  EXPECT_EQ(model.getHelperPtr()->getLargestLayer(), 32);\n  EXPECT_EQ(model.getHelperPtr()->getNumParams(), (6 + 1) * 32 + (32 + 1) * 32 + (32 + 1) * 4);\n  EXPECT_EQ(model.getBlkSharedSizeBytes(), 256);\n  EXPECT_EQ(model.getGrdSharedSizeBytes(), 5696);\n  EXPECT_EQ(model.getHelperPtr()->getBlkSharedSizeBytes(), 256);\n  EXPECT_EQ(model.getHelperPtr()->getGrdSharedSizeBytes(), 5696);\n}\n\nTEST(ARNeuralNetDynamics, BindStreamControlRanges)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  std::array<float2, 2> u_constraint = {};\n  u_constraint[0].x = -1.0;\n  u_constraint[0].y = 1.0;\n\n  u_constraint[1].x = -2.0;\n  u_constraint[1].y = 2.0;\n  NeuralNetModel<7, 2, 3> model(u_constraint, stream);\n\n  EXPECT_EQ(model.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(ARNeuralNetDynamics, BindStreamDefaultArgRanges)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  NeuralNetModel<7, 2, 3> model(stream);\n\n  EXPECT_EQ(model.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(ARNeuralNetDynamics, ControlRangesSetDefaultCPU)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  std::array<float2, 2> ranges = model.getControlRanges();\n  for (int i = 0; i < 2; i++)\n  {\n    EXPECT_FLOAT_EQ(ranges[0].x, -FLT_MAX);\n    EXPECT_FLOAT_EQ(ranges[0].y, FLT_MAX);\n  }\n}\n\nTEST(ARNeuralNetDynamics, ControlRangesSetCPU)\n{\n  std::array<float2, 2> u_constraint = {};\n  u_constraint[0].x = -1.0;\n  u_constraint[0].y = 1.0;\n\n  u_constraint[1].x = -2.0;\n  u_constraint[1].y = 2.0;\n  NeuralNetModel<7, 2, 3> model(u_constraint);\n\n  std::array<float2, 2> ranges = model.getControlRanges();\n  EXPECT_FLOAT_EQ(ranges[0].x, -1.0);\n  EXPECT_FLOAT_EQ(ranges[0].y, 1.0);\n\n  EXPECT_FLOAT_EQ(ranges[1].x, -2.0);\n  EXPECT_FLOAT_EQ(ranges[1].y, 2.0);\n}\n\nTEST(ARNeuralNetDynamics, stideIdcsSetDefault)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  int* result = model.getHelperPtr()->getStrideIdcsPtr();\n\n  EXPECT_EQ(result[0], 0);\n  EXPECT_EQ(result[1], 192);\n  EXPECT_EQ(result[2], 224);\n  EXPECT_EQ(result[3], 1248);\n  EXPECT_EQ(result[4], 1280);\n  EXPECT_EQ(result[5], 1408);\n}\n\nTEST(ARNeuralNetDynamics, GPUSetupAndParamsCheck)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  float* theta = model.getHelperPtr()->getThetaPtr();\n  int* stride = model.getHelperPtr()->getStrideIdcsPtr();\n  int* net_structure = model.getHelperPtr()->getNetStructurePtr();\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n\n  EXPECT_EQ(model.GPUMemStatus_, false);\n  EXPECT_EQ(model.model_d_, nullptr);\n  EXPECT_NE(model.getHelperPtr(), nullptr);\n\n  model.GPUSetup();\n\n  EXPECT_EQ(model.GPUMemStatus_, true);\n  EXPECT_NE(model.model_d_, nullptr);\n\n  // launch kernel\n  launchParameterCheckTestKernel<NeuralNetModel<7, 2, 3>, 1412, 6, 4>(model, theta_result, stride_result,\n                                                                      net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    // these are a bunch of mostly random values and nan != nan\n    if (!isnan(theta[i]))\n    {\n      EXPECT_FLOAT_EQ(theta_result[i], theta[i]);\n    }\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n  }\n\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n  }\n}\n\nTEST(ARNeuralNetDynamics, UpdateModelTest)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  float* theta = model.getHelperPtr()->getThetaPtr();\n  int* stride = model.getHelperPtr()->getStrideIdcsPtr();\n  int* net_structure = model.getHelperPtr()->getNetStructurePtr();\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n\n  model.GPUSetup();\n\n  std::vector<float> theta_vec(1412);\n  srand(time(NULL));\n  for (int i = 0; i < 1412; i++)\n  {\n    theta_vec[i] = rand();\n  }\n\n  model.updateModel({ 6, 32, 32, 4 }, theta_vec);\n\n  // check CPU\n  for (int i = 0; i < 1412; i++)\n  {\n    // these are a bunch of mostly random values and nan != nan\n    if (!isnan(theta_vec[i]))\n    {\n      EXPECT_FLOAT_EQ(model.getHelperPtr()->getThetaPtr()[i], theta_vec[i]);\n    }\n  }\n\n  // launch kernel\n  launchParameterCheckTestKernel<NeuralNetModel<7, 2, 3>, 1412, 6, 4>(model, theta_result, stride_result,\n                                                                      net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    // these are a bunch of mostly random values and nan != nan\n    if (!isnan(theta_vec[i]))\n    {\n      EXPECT_FLOAT_EQ(theta_result[i], theta_vec[i]) << \"failed at index \" << i;\n    }\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n  }\n\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n  }\n}\n\nTEST(ARNeuralNetDynamics, LoadModelTest)\n{\n  NeuralNetModel<7, 2, 3> model;\n  EXPECT_EQ(model.getGrdSharedSizeBytes(), model.getHelperPtr()->getGrdSharedSizeBytes()) << \"Shared mem request \"\n                                                                                             \"doesn't match between \"\n                                                                                             \"Dynamics and Neural \"\n                                                                                             \"Network\";\n  EXPECT_EQ(model.getBlkSharedSizeBytes(), model.getHelperPtr()->getBlkSharedSizeBytes()) << \"Shared mem request \"\n                                                                                             \"doesn't match between \"\n                                                                                             \"Dynamics and Neural \"\n                                                                                             \"Network\";\n  model.GPUSetup();\n\n  // TODO procedurally generate a NN in python and save and run like costs\n  std::string path = mppi::tests::test_load_nn_file;\n  model.loadParams(path);\n\n  // check CPU\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getHelperPtr()->getThetaPtr()[i], i) << \"failed at index \" << i;\n  }\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n\n  EXPECT_EQ(model.getGrdSharedSizeBytes(), model.getHelperPtr()->getGrdSharedSizeBytes()) << \"Shared mem request \"\n                                                                                             \"doesn't match between \"\n                                                                                             \"Dynamics and Neural \"\n                                                                                             \"Network\";\n  EXPECT_EQ(model.getBlkSharedSizeBytes(), model.getHelperPtr()->getBlkSharedSizeBytes()) << \"Shared mem request \"\n                                                                                             \"doesn't match between \"\n                                                                                             \"Dynamics and Neural \"\n                                                                                             \"Network\";\n\n  // launch kernel\n  launchParameterCheckTestKernel<NeuralNetModel<7, 2, 3>, 1412, 6, 4>(model, theta_result, stride_result,\n                                                                      net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(theta_result[i], i) << \"failed at index \" << i;\n  }\n}\n\nTEST(ARNeuralNetDynamics, computeKinematicsTestCPU)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  NeuralNetModel<7, 2, 3>::state_array s(7, 1);\n  NeuralNetModel<7, 2, 3>::state_array s_der(7, 1);\n\n  s(2) = 0.0;  // yaw\n  s(4) = 1.0;  // body frame vx\n  s(5) = 2.0;  // body frame vy\n  s(6) = 0.0;  // yaw dot\n\n  model.computeKinematics(s, s_der);\n\n  EXPECT_FLOAT_EQ(s_der(0), 1.0);\n  EXPECT_FLOAT_EQ(s_der(1), 2.0);\n  EXPECT_FLOAT_EQ(s_der(2), 0.0);\n\n  s(2) = M_PI / 2;  // yaw\n  s(4) = 3.0;       // body frame vx\n  s(5) = 5.0;       // body frame vy\n  s(6) = 1.0;       // yaw dot\n\n  model.computeKinematics(s, s_der);\n\n  EXPECT_FLOAT_EQ(s_der(0), -5);\n  EXPECT_FLOAT_EQ(s_der(1), 3.0);\n  EXPECT_FLOAT_EQ(s_der(2), -1.0);\n}\n\nTEST(ARNeuralNetDynamics, computeKinematicsTestGPU)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  std::vector<std::array<float, 7>> s(1);\n  std::vector<std::array<float, 7>> s_der(1);\n\n  model.GPUSetup();\n\n  for (int y_dim = 1; y_dim < 17; y_dim++)\n  {\n    s[0] = { 0.0 };\n    s[0][2] = 0.0;  // yaw\n    s[0][4] = 1.0;  // body frame vx\n    s[0][5] = 2.0;  // body frame vy\n    s[0][6] = 0.0;  // yaw dot\n\n    s_der[0] = { 0.0 };\n\n    launchComputeKinematicsTestKernel<NeuralNetModel<7, 2, 3>, 7>(model, s, s_der, y_dim);\n\n    EXPECT_FLOAT_EQ(s_der[0][0], 1.0);\n    EXPECT_FLOAT_EQ(s_der[0][1], 2.0);\n    EXPECT_FLOAT_EQ(s_der[0][2], 0.0);\n\n    s[0][2] = M_PI / 2;  // yaw\n    s[0][4] = 3.0;       // body frame vx\n    s[0][5] = 5.0;       // body frame vy\n    s[0][6] = 1.0;       // yaw dot\n\n    launchComputeKinematicsTestKernel<NeuralNetModel<7, 2, 3>, 7>(model, s, s_der, y_dim);\n\n    EXPECT_FLOAT_EQ(s_der[0][0], -5);\n    EXPECT_FLOAT_EQ(s_der[0][1], 3.0);\n    EXPECT_FLOAT_EQ(s_der[0][2], -1.0);\n  }\n}\n\nTEST(ARNeuralNetDynamics, updateStateGPUTest)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  std::vector<std::array<float, 7>> s(1);\n  // x_dot, y_dot, theta_dot\n  std::vector<std::array<float, 7>> s_der(1);\n\n  model.GPUSetup();\n\n  for (int j = 1; j < 17; j++)\n  {\n    s[0] = { 0.0 };\n    s[0][2] = 0.0;  // yaw\n    s[0][4] = 1.0;  // body frame vx\n    s[0][5] = 2.0;  // body frame vy\n    s[0][6] = 0.0;  // yaw dot\n\n    s_der[0] = { 0.0 };\n    s_der[0][0] = 1.0;\n    s_der[0][1] = 2.0;\n    s_der[0][2] = 3.0;\n\n    launchUpdateStateTestKernel<NeuralNetModel<7, 2, 3>, 7>(model, s, s_der, 0.1, j);\n\n    EXPECT_FLOAT_EQ(s_der[0][0], 1);\n    EXPECT_FLOAT_EQ(s_der[0][1], 2);\n    EXPECT_FLOAT_EQ(s_der[0][2], 3);\n    EXPECT_FLOAT_EQ(s_der[0][3], 0);\n    EXPECT_FLOAT_EQ(s_der[0][4], 0);\n    EXPECT_FLOAT_EQ(s_der[0][5], 0);\n    EXPECT_FLOAT_EQ(s_der[0][6], 0);\n\n    EXPECT_FLOAT_EQ(s[0][0], 0.1);\n    EXPECT_FLOAT_EQ(s[0][1], 0.2);\n    EXPECT_FLOAT_EQ(s[0][2], 0.3);\n    EXPECT_FLOAT_EQ(s[0][3], 0.0);\n    EXPECT_FLOAT_EQ(s[0][4], 1.0);\n    EXPECT_FLOAT_EQ(s[0][5], 2.0);\n    EXPECT_FLOAT_EQ(s[0][6], 0.0);\n  }\n}\n\n/**\n *\n * @tparam CLASS_T\n * @param model\n * @param s\n * @param ds\n * @param u\n * @param du\n */\ntemplate <class CLASS_T>\nvoid compareFiniteDifferenceGradient(CLASS_T& model, Eigen::MatrixXf& s, Eigen::MatrixXf& ds, Eigen::MatrixXf& u,\n                                     Eigen::MatrixXf& du)\n{\n  Eigen::MatrixXf s_2(7, 1);\n  s_2 = s + ds;\n  Eigen::MatrixXf u_2(2, 1);\n  u_2 = u + du;\n  Eigen::MatrixXf s_der(7, 1);\n  Eigen::MatrixXf s_der_2(7, 1);\n  s_der.setZero();\n  s_der_2.setZero();\n\n  Eigen::MatrixXf calculated_A(7, 7);\n  Eigen::MatrixXf calculated_B(7, 2);\n\n  model.computeDynamics(s_2, u_2, s_der_2);\n  model.computeDynamics(s, u, s_der);\n  std::cout << \"s_der\\n\" << s_der << std::endl;\n  std::cout << \"s_der_2\\n\" << s_der_2 << std::endl;\n  std::cout << \"s_der_2 - s_der\\n\" << (s_der_2 - s_der) << std::endl;\n\n  Eigen::MatrixXf A(7, 7);\n  Eigen::MatrixXf B(7, 2);\n\n  model.computeGrad(s, u, A, B);\n  std::cout << \"A = \\n\" << A << std::endl;\n  std::cout << \"B = \\n\" << B << std::endl;\n\n  // compare A\n  for (int i = 0; i < 7; i++)\n  {\n    for (int j = 0; j < 7; j++)\n    {\n      EXPECT_NEAR(calculated_A(i, j), A(i, j), 0.01) << \"failed at index = \" << i << \", \" << j;\n    }\n  }\n\n  // compare B\n  for (int i = 0; i < 7; i++)\n  {\n    for (int j = 0; j < 2; j++)\n    {\n      EXPECT_NEAR(calculated_B(i, j), B(i, j), 0.01) << \"failed at index = \" << i << \", \" << j;\n    }\n  }\n}\n\n/*\n// Note math for analytical solution is in the python script\nTEST(ARNeuralNetDynamics, computeGrad) {\n  GTEST_SKIP();\n  NeuralNetModel<7,2,3,6,32,32,4> model;\n\n  Eigen::MatrixXf s(7, 1);\n  Eigen::MatrixXf ds(7, 1);\n  Eigen::MatrixXf u(2, 1);\n  Eigen::MatrixXf du(2, 1);\n  s.setZero();\n  ds.setZero();\n  u.setZero();\n  du.setZero();\n  ds << 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0;\n\n  std::vector<float> theta(1412);\n\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateModel({6, 32, 32, 4}, theta);\n\n  compareFiniteDifferenceGradient(model, s, ds, u, du);\n\n}\n */\n\nTEST(ARNeuralNetDynamics, computeDynamicsCPU)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  NeuralNetModel<7, 2, 3>::state_array s;\n  NeuralNetModel<7, 2, 3>::control_array u;\n  NeuralNetModel<7, 2, 3>::state_array s_der;\n  s.setZero();\n  s_der.setZero();\n  u << 1, -1;\n\n  std::vector<float> theta(1412);\n\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  model.computeDynamics(s, u, s_der);\n\n  EXPECT_FLOAT_EQ(s(0), 0);\n  EXPECT_FLOAT_EQ(s(1), 0);\n  EXPECT_FLOAT_EQ(s(2), 0);\n  EXPECT_FLOAT_EQ(s(3), 0);\n  EXPECT_FLOAT_EQ(s(4), 0);\n  EXPECT_FLOAT_EQ(s(5), 0);\n  EXPECT_FLOAT_EQ(s(6), 0);\n\n  EXPECT_FLOAT_EQ(s_der(0), 0);\n  EXPECT_FLOAT_EQ(s_der(1), 0);\n  EXPECT_FLOAT_EQ(s_der(2), 0);\n  EXPECT_FLOAT_EQ(s_der(3), 33);\n  EXPECT_FLOAT_EQ(s_der(4), 33);\n  EXPECT_FLOAT_EQ(s_der(5), 33);\n  EXPECT_FLOAT_EQ(s_der(6), 33);\n\n  EXPECT_FLOAT_EQ(u(0), 1.0);\n  EXPECT_FLOAT_EQ(u(1), -1.0);\n}\n\nTEST(ARNeuralNetDynamics, computeDynamicsGPU)\n{\n  NeuralNetModel<7, 2, 3> model;\n\n  std::vector<std::array<float, 7>> s(1);\n  // x_dot, y_dot, theta_dot\n  std::vector<std::array<float, 7>> s_der(1);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(1);\n\n  std::vector<float> theta(1412);\n  model.GPUSetup();\n\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  for (int y_dim = 1; y_dim < 17; y_dim++)\n  {\n    s[0] = { 0 };\n    s_der[0] = { 0 };\n    u[0] = { 0 };\n    u[0][0] = 1.0;\n    u[0][1] = -1.0;\n\n    launchComputeDynamicsTestKernel<NeuralNetModel<7, 2, 3>, 7, 2>(model, s, u, s_der, y_dim);\n\n    EXPECT_FLOAT_EQ(s[0][0], 0);\n    EXPECT_FLOAT_EQ(s[0][1], 0);\n    EXPECT_FLOAT_EQ(s[0][2], 0);\n    EXPECT_FLOAT_EQ(s[0][3], 0);\n    EXPECT_FLOAT_EQ(s[0][4], 0);\n    EXPECT_FLOAT_EQ(s[0][5], 0);\n    EXPECT_FLOAT_EQ(s[0][6], 0);\n\n    EXPECT_FLOAT_EQ(s_der[0][0], 0);\n    EXPECT_FLOAT_EQ(s_der[0][1], 0);\n    EXPECT_FLOAT_EQ(s_der[0][2], 0);\n    EXPECT_FLOAT_EQ(s_der[0][3], 33) << \"at y_dim \" << y_dim;\n    EXPECT_FLOAT_EQ(s_der[0][4], 33) << \"at y_dim \" << y_dim;\n    EXPECT_FLOAT_EQ(s_der[0][5], 33) << \"at y_dim \" << y_dim;\n    EXPECT_FLOAT_EQ(s_der[0][6], 33) << \"at y_dim \" << y_dim;\n\n    EXPECT_FLOAT_EQ(u[0][0], 1.0);\n    EXPECT_FLOAT_EQ(u[0][1], -1.0);\n  }\n}\n\n// TODO compute state deriv CPU\nTEST(ARNeuralNetDynamics, computeStateDerivCPU)\n{\n}\n\nTEST(ARNeuralNetDynamics, computeStateDerivGPU)\n{\n  NeuralNetModel<7, 2, 3> model;\n  model.GPUSetup();\n\n  std::vector<std::array<float, 7>> s(1);\n  // x_dot, y_dot, theta_dot\n  std::vector<std::array<float, 7>> s_der(1);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(1);\n\n  std::vector<float> theta(1412);\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  for (int j = 1; j < 17; j++)\n  {\n    s[0] = { 0.0 };\n    s[0][4] = 1;\n    s[0][5] = 2;\n    s[0][6] = 3;\n\n    s_der[0] = { 0.0 };\n    u[0] = { 0.0 };\n\n    launchComputeStateDerivTestKernel<NeuralNetModel<7, 2, 3>, 7, 2>(model, s, u, s_der, j);\n\n    EXPECT_FLOAT_EQ(s[0][0], 0);\n    EXPECT_FLOAT_EQ(s[0][1], 0);\n    EXPECT_FLOAT_EQ(s[0][2], 0);\n    EXPECT_FLOAT_EQ(s[0][3], 0);\n    EXPECT_FLOAT_EQ(s[0][4], 1);\n    EXPECT_FLOAT_EQ(s[0][5], 2);\n    EXPECT_FLOAT_EQ(s[0][6], 3);\n\n    EXPECT_FLOAT_EQ(s_der[0][0], 1);\n    EXPECT_FLOAT_EQ(s_der[0][1], 2);\n    EXPECT_FLOAT_EQ(s_der[0][2], -3);\n    EXPECT_FLOAT_EQ(s_der[0][3], 33);\n    EXPECT_FLOAT_EQ(s_der[0][4], 33);\n    EXPECT_FLOAT_EQ(s_der[0][5], 33);\n    EXPECT_FLOAT_EQ(s_der[0][6], 33);\n\n    EXPECT_FLOAT_EQ(u[0][0], 0);\n    EXPECT_FLOAT_EQ(u[0][1], 0);\n  }\n}\n\n// TODO add in a generic GPU/CPU method call\n\nvoid parseTextIntoDataPointHelper(std::string text, std::array<float, 7>& state, std::array<float, 7>& state_result,\n                                  std::array<float, 7>& state_der, std::array<float, 2>& control)\n{\n  size_t line_pos = 0;\n  size_t prev_line_pos = 1;\n  int what_var = 0;\n  text.append(\" *\");\n  while ((line_pos = text.find(\"*\", prev_line_pos)) != std::string::npos)\n  {\n    std::string line = text.substr(prev_line_pos, line_pos - prev_line_pos);\n    line.append(\" \");\n    size_t value_pos = 0;\n    size_t prev_value_pos = 0;\n    int counter = 0;\n    while ((value_pos = line.find(\" \", prev_value_pos)) != std::string::npos)\n    {\n      std::string value = line.substr(prev_value_pos, value_pos - prev_value_pos);\n      // makes sure it is a number\n      if (!value.empty())\n      {\n        float number = 0;  // = std::stoi(value.substr(0, std::string::npos));\n        if (value[0] == '-' && isdigit(value[1]) || isdigit(value[0]))\n        {\n          number = std::stof(value);\n        }\n        else\n        {\n          prev_value_pos = value_pos + 1;\n          continue;\n        }\n        if (what_var == 0)\n        {\n          state[counter++] = number;\n        }\n        else if (what_var == 1)\n        {\n          control[counter++] = number;\n        }\n        else if (what_var == 2)\n        {\n          state_der[counter++] = number;\n        }\n        else if (what_var == 3)\n        {\n          state_result[counter++] = number;\n        }\n      }\n      prev_value_pos = std::min(value_pos + 1, line.length());\n    }\n    what_var++;\n    prev_line_pos = std::min(line_pos + 1, text.length());\n  }\n}\n\nclass DynamicsDummy : public NeuralNetModel<7, 2, 3>\n{\npublic:\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n  {\n    return false;\n  };\n};\n\nTEST(ARNeuralNetDynamics, computeGradTest)\n{\n  std::string path = mppi::tests::old_autorally_network_file;\n\n  Eigen::Matrix<float, NeuralNetModel<7, 2, 3>::STATE_DIM,\n                NeuralNetModel<7, 2, 3>::STATE_DIM + NeuralNetModel<7, 2, 3>::CONTROL_DIM>\n      numeric_jac;\n  Eigen::Matrix<float, NeuralNetModel<7, 2, 3>::STATE_DIM,\n                NeuralNetModel<7, 2, 3>::STATE_DIM + NeuralNetModel<7, 2, 3>::CONTROL_DIM>\n      analytic_jac;\n  NeuralNetModel<7, 2, 3>::state_array state;\n  state << 4.264431, -30.974377, -0.955451, -0.028595, 3.346700, 0.048521, 0.315486;\n  NeuralNetModel<7, 2, 3>::control_array control;\n  control << -0.221381, 0.089168;\n\n  auto analytic_grad_model = NeuralNetModel<7, 2, 3>();\n  analytic_grad_model.loadParams(path);\n\n  NeuralNetModel<7, 2, 3>::dfdx A_analytic = NeuralNetModel<7, 2, 3>::dfdx::Zero();\n  NeuralNetModel<7, 2, 3>::dfdu B_analytic = NeuralNetModel<7, 2, 3>::dfdu::Zero();\n\n  analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n\n  auto numerical_grad_model = DynamicsDummy();\n  numerical_grad_model.loadParams(path);\n\n  std::shared_ptr<ModelWrapperDDP<DynamicsDummy>> ddp_model =\n      std::make_shared<ModelWrapperDDP<DynamicsDummy>>(&numerical_grad_model);\n\n  analytic_jac.leftCols<NeuralNetModel<7, 2, 3>::STATE_DIM>() = A_analytic;\n  analytic_jac.rightCols<NeuralNetModel<7, 2, 3>::CONTROL_DIM>() = B_analytic;\n  numeric_jac = ddp_model->df(state, control);\n\n  ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-1) << \"Numeric Jacobian\\n\"\n                                                       << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n                                                       << analytic_jac;\n}\n"
  },
  {
    "path": "tests/dynamics/bicycle_slip_parametric_model_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/bicycle_slip/bicycle_slip_parametric.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <cuda_runtime.h>\n\nclass BicycleSlipParametricTest : public ::testing::Test\n{\npublic:\n  cudaStream_t stream;\n\n  void SetUp() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamDestroy(stream));\n  }\n};\n\nTEST_F(BicycleSlipParametricTest, Template)\n{\n  auto dynamics = BicycleSlipParametric();\n  EXPECT_EQ(12, BicycleSlipParametric::STATE_DIM);\n  EXPECT_EQ(2, BicycleSlipParametric::CONTROL_DIM);\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_NE(dynamics.getTextureHelperNormals(), nullptr);\n}\n\nTEST_F(BicycleSlipParametricTest, BindStream)\n{\n  auto dynamics = BicycleSlipParametric(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n  EXPECT_NE(dynamics.getTextureHelperNormals(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelperNormals()->stream_, stream);\n}\n\n/*\nfloat c_t = 1.3;\nfloat c_b = 2.5;\nfloat c_v = 3.7;\nfloat c_0 = 4.9;\nfloat wheel_base = 0.3;\n */\n\nTEST_F(BicycleSlipParametricTest, ComputeDynamics)\n{\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n  auto params = dynamics.getParams();\n  BicycleSlipParametric::state_array x = BicycleSlipParametric::state_array::Zero();\n  BicycleSlipParametric::control_array u = BicycleSlipParametric::control_array::Zero();\n\n  // computeDynamics should not touch the roll/pitch element\n  BicycleSlipParametric::state_array x_dot = BicycleSlipParametric::state_array::Ones() * 0.153;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = 2.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)) = 1.0f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 1.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -6.5);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = M_PI_2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), -1.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -6.5);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)) = 0.6f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), -1.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2 + 1.0 * 0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -6.5 - 2 * 0.6 + 0.6 * 1.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), -0.6 * 2.2 - 4.2 * 0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = 0.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)) = 0.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)) = 0.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)) = 0.1f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), -0.66);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2 - 30 * 0.1 * 0.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = 0.5f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.5f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), -0.66);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 0.5 - 30 * 0.1 * 0.5);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = -0.5f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), -0.5f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), -0.66);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0.079 * 0.5 + 30 * 0.1 * 0.5);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = -2.0f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), -2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), -0.66);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0.079 * 2 + 30 * 0.1 * 0.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = 2.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)) = 0.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)) = 1.0f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), -0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), (2 / 0.3) * tanf(1.0f / -9.1f) * 2.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = 2.0f;\n  x(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)) = -0.6;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), -0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), -0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -0.6 * 1.6 + 2 * 0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)),\n                  (0.6 + (2 / 0.3) * tanf(1.0f / -9.1f)) * 2.2 + 4.2 * 0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = 2.0f;\n  float yaw_rate = (2 / 0.3) * tanf(1.0f / -9.1f);\n  x(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)) = yaw_rate;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), -0.6);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), yaw_rate * 1.6 - 2 * yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), -4.2 * yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)) = 2.0f;\n  u(C_IND_CLASS(BicycleSlipParametricParams, THROTTLE_BRAKE)) = 1.0;\n  u(C_IND_CLASS(BicycleSlipParametricParams, STEER_CMD)) = 0.3;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0.3);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.079 * 2 + 4 * -0.0145f + 2 * 0.15 + 2.73);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), yaw_rate * 1.6 - 2 * yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), -4.2 * yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  u(C_IND_CLASS(BicycleSlipParametricParams, THROTTLE_BRAKE)) = 0.3;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 2.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0.3);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0.0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)),\n                  -0.079 * 2 + 0.3 * (4 * -0.0145f + 2 * 0.15 + 2.73));\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), yaw_rate * 1.6 - 2 * yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), -4.2 * yaw_rate);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n}\n\nTEST_F(BicycleSlipParametricTest, ComputeDynamicsWithNormals)\n{\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n  auto params = dynamics.getParams();\n  BicycleSlipParametric::state_array x = BicycleSlipParametric::state_array::Zero();\n  BicycleSlipParametric::control_array u = BicycleSlipParametric::control_array::Zero();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float4>* helper = dynamics.getTextureHelperNormals();\n  helper->setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(0.0f, 0.0f, 1.0f, 0.0f);\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  // computeDynamics should not touch the roll/pitch element\n  BicycleSlipParametric::state_array x_dot = BicycleSlipParametric::state_array::Ones() * 0.153;\n  // dynamics.computeDynamics(x, u, x_dot);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  // EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(0.4f, 0.0f, 1.0 - sqrt(0.4f * 0.4f), 0.0f);\n  }\n  helper->updateTexture(0, data_vec);\n  helper->copyToDevice(true);\n\n  x = BicycleSlipParametric::state_array::Zero();\n  x_dot = BicycleSlipParametric::state_array::Ones() * 0.153;\n  u(C_IND_CLASS(BicycleSlipParametricParams, THROTTLE_BRAKE)) = 5.0f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0.4 * 3.9 + 4.02);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n  u(C_IND_CLASS(BicycleSlipParametricParams, THROTTLE_BRAKE)) = 0.0f;\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = M_PI_2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_NEAR(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0, 1.0e-7);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = -M_PI_2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_NEAR(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0, 1.0e-7);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = M_PI_4;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), sqrtf(2) / 2 * 0.4 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), sqrtf(2) / 2 * 0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = -M_PI_4;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), sqrtf(2) / 2 * 0.4 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -sqrtf(2) / 2 * 0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(-0.4f, 0.0f, 1.0 - sqrt(0.4f * 0.4f), 0.0f);\n  }\n  helper->updateTexture(0, data_vec);\n  helper->copyToDevice(true);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = 0;\n  u(C_IND_CLASS(BicycleSlipParametricParams, THROTTLE_BRAKE)) = 5.0f;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.4 * 3.9 + 4.02);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n  u(C_IND_CLASS(BicycleSlipParametricParams, THROTTLE_BRAKE)) = 0.0f;\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = M_PI_2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_NEAR(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0, 1.0e-7);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = -M_PI_2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_NEAR(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0, 1.0e-7);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = M_PI_4;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -sqrtf(2) / 2 * 0.4 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -sqrtf(2) / 2 * 0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = -M_PI_4;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -sqrtf(2) / 2 * 0.4 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), sqrtf(2) / 2 * 0.4 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(0.2f, 0.1f, 1.0 - sqrt(0.2f * 0.2f + 0.1f * 0.1f), 0.0f);\n  }\n  helper->updateTexture(0, data_vec);\n  helper->copyToDevice(true);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = 0;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0.2 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.1 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = M_PI_2;\n  float sq2_2 = sqrtf(2) / 2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), -0.1 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.2 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n\n  x(S_IND_CLASS(BicycleSlipParametricParams, YAW)) = -M_PI_2;\n  dynamics.computeDynamics(x, u, x_dot);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.0f);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0.1 * 3.9);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), -0.2 * 7.2);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(x_dot(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 0);\n}\n\nTEST_F(BicycleSlipParametricTest, TestModelGPU)\n{\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 100> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 100>::Random();\n  Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 100> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 100>::Random();\n\n  std::vector<std::array<float, 12>> s(100);\n  std::vector<std::array<float, 12>> s_der(100);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(100);\n  for (int state_index = 0; state_index < s.size(); state_index++)\n  {\n    for (int dim = 0; dim < s[0].size(); dim++)\n    {\n      s[state_index][dim] = state_trajectory.col(state_index)(dim);\n    }\n    for (int dim = 0; dim < u[0].size(); dim++)\n    {\n      u[state_index][dim] = control_trajectory.col(state_index)(dim);\n    }\n  }\n\n  // These variables will be changed so initialized to the right size only\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 4; y_dim++)\n  {\n    launchComputeDynamicsTestKernel<BicycleSlipParametric, 12, 2>(dynamics, s, u, s_der, y_dim);\n    for (int point = 0; point < 100; point++)\n    {\n      BicycleSlipParametric::state_array state = state_trajectory.col(point);\n      BicycleSlipParametric::control_array control = control_trajectory.col(point);\n      BicycleSlipParametric::state_array state_der_cpu = BicycleSlipParametric::state_array::Zero();\n\n      dynamics.computeDynamics(state, control, state_der_cpu);\n      for (int dim = 0; dim < 12; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5)\n            << \"at point \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_der[point][dim]));\n      }\n    }\n  }\n\n  dynamics.freeCudaMem();\n  CudaCheckError();\n}\n\nTEST_F(BicycleSlipParametricTest, TestModelGPUNormals)\n{\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float4>* helper = dynamics.getTextureHelperNormals();\n  helper->setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(0.2f, 0.1f, 1.0 - sqrt(0.2f * 0.2f + 0.1f * 0.1f), 0.0f);\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 100> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 100>::Random();\n  Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 100> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 100>::Random();\n\n  std::vector<std::array<float, 12>> s(100);\n  std::vector<std::array<float, 12>> s_der(100);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(100);\n  for (int state_index = 0; state_index < s.size(); state_index++)\n  {\n    for (int dim = 0; dim < s[0].size(); dim++)\n    {\n      s[state_index][dim] = state_trajectory.col(state_index)(dim);\n    }\n    for (int dim = 0; dim < u[0].size(); dim++)\n    {\n      u[state_index][dim] = control_trajectory.col(state_index)(dim);\n    }\n  }\n\n  // These variables will be changed so initialized to the right size only\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 4; y_dim++)\n  {\n    launchComputeDynamicsTestKernel<BicycleSlipParametric, 12, 2>(dynamics, s, u, s_der, y_dim);\n    for (int point = 0; point < 100; point++)\n    {\n      BicycleSlipParametric::state_array state = state_trajectory.col(point);\n      BicycleSlipParametric::control_array control = control_trajectory.col(point);\n      BicycleSlipParametric::state_array state_der_cpu = BicycleSlipParametric::state_array::Zero();\n\n      dynamics.computeDynamics(state, control, state_der_cpu);\n      for (int dim = 0; dim < 12; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5)\n            << \"at point \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_der[point][dim]));\n      }\n    }\n  }\n\n  dynamics.freeCudaMem();\n  CudaCheckError();\n}\n\nTEST_F(BicycleSlipParametricTest, TestModelGPUNormalsReverse)\n{\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n  auto params = dynamics.getParams();\n  params.gear_sign = -1;\n  dynamics.setParams(params);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float4>* helper = dynamics.getTextureHelperNormals();\n  helper->setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(0.2f, 0.1f, 1.0 - sqrt(0.2f * 0.2f + 0.1f * 0.1f), 0.0f);\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 100> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 100>::Random();\n  Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 100> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 100>::Random();\n\n  std::vector<std::array<float, 12>> s(100);\n  std::vector<std::array<float, 12>> s_der(100);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(100);\n  for (int state_index = 0; state_index < s.size(); state_index++)\n  {\n    for (int dim = 0; dim < s[0].size(); dim++)\n    {\n      s[state_index][dim] = state_trajectory.col(state_index)(dim);\n    }\n    for (int dim = 0; dim < u[0].size(); dim++)\n    {\n      u[state_index][dim] = control_trajectory.col(state_index)(dim);\n    }\n  }\n\n  // These variables will be changed so initialized to the right size only\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 4; y_dim++)\n  {\n    launchComputeDynamicsTestKernel<BicycleSlipParametric, 12, 2>(dynamics, s, u, s_der, y_dim);\n    for (int point = 0; point < 100; point++)\n    {\n      BicycleSlipParametric::state_array state = state_trajectory.col(point);\n      BicycleSlipParametric::control_array control = control_trajectory.col(point);\n      BicycleSlipParametric::state_array state_der_cpu = BicycleSlipParametric::state_array::Zero();\n\n      dynamics.computeDynamics(state, control, state_der_cpu);\n      for (int dim = 0; dim < 12; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5)\n            << \"at point \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_der[point][dim]));\n      }\n    }\n  }\n\n  dynamics.freeCudaMem();\n  CudaCheckError();\n}\n\nTEST_F(BicycleSlipParametricTest, TestUpdateState)\n{\n  CudaCheckError();\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n  BicycleSlipParametric::state_array state = BicycleSlipParametric::state_array::Zero();\n  BicycleSlipParametric::state_array next_state = BicycleSlipParametric::state_array::Ones() * NAN;\n  BicycleSlipParametric::state_array state_der = BicycleSlipParametric::state_array::Ones();\n\n  // TODO add in the elevation map\n\n  dynamics.updateState(state, next_state, state_der, 0.1);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, POS_X)), 0.1f);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, POS_Y)), 0.1f);\n  EXPECT_NEAR(next_state(S_IND_CLASS(BicycleSlipParametricParams, YAW)), 0.1f, 1.0e-5);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE)), 0.1f);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, BRAKE_STATE)), 0.1f);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, VEL_X)), 0.1f);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, VEL_Y)), 0.1f);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, OMEGA_Z)), 0.1f);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, ROLL)), 0);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, PITCH)), 0);\n  EXPECT_FLOAT_EQ(next_state(S_IND_CLASS(BicycleSlipParametricParams, STEER_ANGLE_RATE)), 1.0f);\n\n  CudaCheckError();\n}\n\nTEST_F(BicycleSlipParametricTest, TestStepGPUvsCPU)\n{\n  const int num_rollouts = 1;\n  const float dt = 0.1f;\n  CudaCheckError();\n  BicycleSlipParametric dynamics = BicycleSlipParametric();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  std::vector<float4> normal_data_vec;\n  normal_data_vec.resize(10 * 20);\n  for (int i = 0; i < normal_data_vec.size(); i++)\n  {\n    normal_data_vec[i] = make_float4(0.2f, 0.1f, 1.0 - sqrt(0.2f * 0.2f + 0.1f * 0.1f), 0.0f);\n  }\n\n  TwoDTextureHelper<float4>* normal_helper = dynamics.getTextureHelperNormals();\n  normal_helper->setExtent(0, extent);\n  normal_helper->updateRotation(0, new_rot_mat);\n  normal_helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  normal_helper->updateTexture(0, normal_data_vec);\n  normal_helper->updateResolution(0, 10);\n  normal_helper->enableTexture(0);\n  normal_helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, num_rollouts> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, num_rollouts>::Random();\n  Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, num_rollouts> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, num_rollouts>::Random();\n\n  std::vector<std::array<float, BicycleSlipParametric::STATE_DIM>> s(num_rollouts);\n  std::vector<std::array<float, BicycleSlipParametric::STATE_DIM>> s_next(num_rollouts);\n  std::vector<std::array<float, BicycleSlipParametric::STATE_DIM>> s_der(num_rollouts);\n  // steering, throttle\n  std::vector<std::array<float, BicycleSlipParametric::CONTROL_DIM>> u(num_rollouts);\n\n  BicycleSlipParametric::state_array state;\n  BicycleSlipParametric::state_array next_state_cpu;\n  BicycleSlipParametric::control_array control;\n  BicycleSlipParametric::output_array output;\n  BicycleSlipParametric::state_array state_der_cpu = BicycleSlipParametric::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < s[0].size(); dim++)\n      {\n        s[state_index][dim] = state_trajectory.col(state_index)(dim);\n      }\n      for (int dim = 0; dim < u[0].size(); dim++)\n      {\n        u[state_index][dim] = control_trajectory.col(state_index)(dim);\n      }\n    }\n\n    launchStepTestKernel<BicycleSlipParametric>(dynamics, s, u, s_der, s_next, 0, dt, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      state = state_trajectory.col(point);\n      control = control_trajectory.col(point);\n      state_der_cpu = BicycleSlipParametric::state_array::Zero();\n\n      dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n      // -1 for the filler state to get 4 aligned states\n      for (int dim = 0; dim < BicycleSlipParametric::STATE_DIM - 1; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-4)\n            << \"at point \" << point << \" with y_dim \" << y_dim << \" at dim \" << dim;\n        // EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_NEAR(next_state_cpu(dim), s_next[point][dim], 1e-4)\n            << \"at point \" << point << \" with y_dim \" << y_dim << \" at dim \" << dim;\n        EXPECT_TRUE(isfinite(s_next[point][dim]));\n      }\n    }\n  }\n  dynamics.freeCudaMem();\n}\n//\n// TEST_F(BicycleSlipParametricTest, TestStepReverse)\n// {\n//   CudaCheckError();\n//   using DYN = BicycleSlipParametric;\n//   const float tol = 1e-6;\n//   DYN dynamics = DYN();\n//   auto params = dynamics.getParams();\n//   params.c_0 = 0;\n//   params.c_b[0] = 1;\n//   params.c_b[1] = 10;\n//   params.c_b[2] = 100;\n//   params.c_v[0] = 0.25;\n//   params.c_v[1] = 0.5;\n//   params.c_v[2] = 0.75;\n//   params.c_t[0] = 2;\n//   params.c_t[1] = 20;\n//   params.c_t[2] = 200;\n//   params.low_min_throttle = 0.2;\n//   params.steer_command_angle_scale = 0.5;\n//   params.steering_constant = 0.5;\n//   params.wheel_base = 0.5;\n//   params.max_steer_rate = 5;\n//   params.gear_sign = -1;\n//   dynamics.setParams(params);\n//   DYN::state_array state;\n//   DYN::state_array next_state;\n//   DYN::state_array state_der = DYN::state_array::Zero();\n//   DYN::control_array control;\n//   DYN::output_array output;\n//   float dt = 0.1;\n//   // TODO add in the elevation map\n//\n//   // Basic initial state and no movement should stay still\n//   state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n//   control << 0, 0;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_TRUE(state_der == DYN::state_array::Zero());\n//   EXPECT_NEAR(next_state(0), 0.0, tol);\n//   EXPECT_NEAR(next_state(1), 0.0, tol);\n//   EXPECT_NEAR(next_state(2), 0.0, tol);\n//   EXPECT_NEAR(next_state(3), 0.0, tol);\n//   EXPECT_NEAR(next_state(4), 0.0, tol);\n//   EXPECT_NEAR(next_state(5), 0.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), 0.0, tol);\n//   EXPECT_NEAR(output(23), 0.0, tol);\n//\n//   // Apply full throttle from zero state\n//   state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n//   control << 1, 0;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(state_der(0), -1.6, tol);\n//   EXPECT_NEAR(next_state(0), -0.16, tol);\n//   EXPECT_NEAR(next_state(1), 0.0, tol);\n//   EXPECT_NEAR(next_state(2), 0.0, tol);\n//   EXPECT_NEAR(next_state(3), 0.0, tol);\n//   EXPECT_NEAR(next_state(4), 0.0, tol);\n//   EXPECT_NEAR(next_state(5), 0.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), 0.0, tol);\n//   EXPECT_NEAR(output(23), -1.6, tol);\n//\n//   // Apply throttle to a state with positive velocity\n//   state << 1, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n//   control << 1, 0;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(state_der(0), -5.5, tol);\n//   EXPECT_NEAR(next_state(0), 0.45, tol);\n//   EXPECT_NEAR(next_state(1), 0.0, tol);\n//   EXPECT_NEAR(next_state(2), 0.1, tol);\n//   EXPECT_NEAR(next_state(3), 0.0, tol);\n//   EXPECT_NEAR(next_state(4), 0.0, tol);\n//   EXPECT_NEAR(next_state(5), 0.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), 0.0, tol);\n//   EXPECT_NEAR(output(23), -5.5, tol);\n//   EXPECT_NEAR(output(24), 0.0, tol);\n//\n//   // Apply full throttle and half left turn to origin state\n//   state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n//   control << 1, 0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(state_der(0), -1.6, tol);\n//   EXPECT_NEAR(next_state(0), -0.16, tol);\n//   EXPECT_NEAR(next_state(1), 0.0, tol);\n//   EXPECT_NEAR(next_state(2), 0.0, tol);\n//   EXPECT_NEAR(next_state(3), 0.0, tol);\n//   EXPECT_NEAR(next_state(4), powf(0.5, 3) * dt, tol);\n//   EXPECT_NEAR(next_state(5), 0.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), powf(0.5, 3), tol);\n//   EXPECT_NEAR(output(23), -1.6, tol);\n//\n//   // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left\n//   float yaw = M_PI / 6;\n//   state << 1.0, yaw, 0, 0, 0, -0.0, 0.0, 0, 0;\n//   control << 1, 0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(state_der(0), -5.5, tol);\n//   EXPECT_NEAR(next_state(0), 0.45, tol);\n//   EXPECT_NEAR(next_state(1), yaw, tol);\n//   EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), powf(0.5, 3) * dt, tol);\n//   EXPECT_NEAR(next_state(5), 0.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), powf(0.5, 3), tol);\n//   EXPECT_NEAR(output(23), -5.5, tol);\n//\n//   // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left which is already\n//   turning float steer_angle = M_PI / 8; state << 1.0, yaw, 0, 0, steer_angle, -0.0, 0.0, 0, 0; control << 1, 0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(state_der(0), -5.5, tol);\n//   EXPECT_NEAR(next_state(0), 0.45, tol);\n//   EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n//   EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n//   EXPECT_NEAR(next_state(5), 0.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n//   EXPECT_NEAR(output(23), -5.5, tol);\n//\n//   // Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n//   state << 1.0, yaw, 0, 0, steer_angle, 1.0, 0, 0.0, 0, 0;\n//   control << -1, 0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(state_der(0), -5.5, tol);\n//   EXPECT_NEAR(next_state(0), 1 - 5.5 * dt, tol);\n//   EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n//   EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n//   EXPECT_NEAR(next_state(5), 1.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n//   EXPECT_NEAR(output(23), -5.5, tol);\n//\n//   /**\n//    * Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n//    * and on a downward facing hill\n//    */\n//   float pitch = 20 * M_PI / 180;\n//   state << 1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n//   control << -1, 0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(next_state(0), 1 + (-5.5 + 9.81 * sinf(pitch)) * dt, tol);\n//   EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n//   EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n//   EXPECT_NEAR(next_state(5), 1.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n//   EXPECT_NEAR(output(23), (-5.5 + 9.81 * sinf(pitch)), tol);\n//\n//   /**\n//    * Apply full brake and half left turn to a backwards moving state oriented 30 degrees to the left which is already\n//    * turning and on a downward facing hill\n//    */\n//   state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n//   control << -1, 0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n//   EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n//   EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n//   EXPECT_NEAR(next_state(5), 1.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n//   EXPECT_NEAR(output(23), (5.5 + 9.81 * sinf(pitch)), tol);\n//\n//   /**\n//    * Apply full brake and half right turn to a backwards moving state oriented 30 degrees to the left which is\n//    already\n//    * turning and on a downward facing hill\n//    */\n//   state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n//   control << -1, -0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n//   EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n//   EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), steer_angle + (-0.25 - steer_angle) * 0.5 * dt, tol);\n//   EXPECT_NEAR(next_state(5), 1.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), (-0.25 - steer_angle) * 0.5, tol);\n//   EXPECT_NEAR(output(23), (5.5 + 9.81 * sinf(pitch)), tol);\n//\n//   /**\n//    * Apply full brake and half right turn to a backwards moving state with a huge steering angle to test max steer\n//    * angle and steering rate. We are also on a downward facing hill and are already oriented 30 degrees to the left\n//    */\n//   steer_angle *= 100;\n//   state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n//   control << -1, -0.5;\n//   dynamics.step(state, next_state, state_der, control, output, 0, dt);\n//   EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n//   EXPECT_NEAR(next_state(1), yaw + tan(steer_angle / -9.1) * dt * -2, tol);\n//   EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n//   EXPECT_NEAR(next_state(4), params.max_steer_angle, tol);\n//   EXPECT_NEAR(next_state(5), 1.0, tol);\n//   EXPECT_NEAR(next_state(6), 0.0, tol);\n//   EXPECT_NEAR(next_state(7), 0.0, tol);\n//   EXPECT_NEAR(next_state(8), -params.max_steer_rate, tol);\n//   EXPECT_NEAR(output(23), (5.5 + 9.81 * sinf(pitch)), tol);\n// }\n//\n// TEST_F(BicycleSlipParametricTest, TestStepGPUvsCPUReverse)\n// {\n//   const int num_rollouts = 2000;\n//   const float dt = 0.1f;\n//   CudaCheckError();\n//   BicycleSlipParametric dynamics = BicycleSlipParametric();\n//   auto params = dynamics.getParams();\n//   params.gear_sign = -1;\n//   dynamics.setParams(params);\n//\n//   cudaExtent extent = make_cudaExtent(10, 20, 0);\n//   TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n//   helper->setExtent(0, extent);\n//\n//   std::vector<float> data_vec;\n//   data_vec.resize(10 * 20);\n//   for (int i = 0; i < data_vec.size(); i++)\n//   {\n//     data_vec[i] = i * 1.0f;\n//   }\n//\n//   std::array<float3, 3> new_rot_mat{};\n//   new_rot_mat[0] = make_float3(0, 1, 0);\n//   new_rot_mat[1] = make_float3(1, 0, 0);\n//   new_rot_mat[2] = make_float3(0, 0, 1);\n//   helper->updateRotation(0, new_rot_mat);\n//   helper->updateOrigin(0, make_float3(1, 2, 3));\n//\n//   helper->updateTexture(0, data_vec);\n//   helper->updateResolution(0, 10);\n//   helper->enableTexture(0);\n//   helper->copyToDevice(true);\n//\n//   CudaCheckError();\n//   dynamics.GPUSetup();\n//   CudaCheckError();\n//\n//   Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, num_rollouts> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, num_rollouts>::Random();\n//   Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, num_rollouts> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, num_rollouts>::Random();\n//\n//   std::vector<std::array<float, BicycleSlipParametric::STATE_DIM>> s(num_rollouts);\n//   std::vector<std::array<float, BicycleSlipParametric::STATE_DIM>> s_next(num_rollouts);\n//   std::vector<std::array<float, BicycleSlipParametric::STATE_DIM>> s_der(num_rollouts);\n//   // steering, throttle\n//   std::vector<std::array<float, BicycleSlipParametric::CONTROL_DIM>> u(num_rollouts);\n//\n//   BicycleSlipParametric::state_array state;\n//   BicycleSlipParametric::state_array next_state_cpu;\n//   BicycleSlipParametric::control_array control;\n//   BicycleSlipParametric::output_array output;\n//   BicycleSlipParametric::state_array state_der_cpu = BicycleSlipParametric::state_array::Zero();\n//\n//   // Run dynamics on dynamicsU\n//   // Run dynamics on GPU\n//   for (int y_dim = 1; y_dim <= 16; y_dim++)\n//   {\n//     for (int state_index = 0; state_index < num_rollouts; state_index++)\n//     {\n//       for (int dim = 0; dim < s[0].size(); dim++)\n//       {\n//         s[state_index][dim] = state_trajectory.col(state_index)(dim);\n//       }\n//       for (int dim = 0; dim < u[0].size(); dim++)\n//       {\n//         u[state_index][dim] = control_trajectory.col(state_index)(dim);\n//       }\n//     }\n//\n//     launchStepTestKernel<BicycleSlipParametric>(dynamics, s, u, s_der, s_next, 0, dt, y_dim);\n//     for (int point = 0; point < num_rollouts; point++)\n//     {\n//       state = state_trajectory.col(point);\n//       control = control_trajectory.col(point);\n//       state_der_cpu = BicycleSlipParametric::state_array::Zero();\n//\n//       dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n//       for (int dim = 0; dim < BicycleSlipParametric::STATE_DIM; dim++)\n//       {\n//         EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n//         // EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n//         EXPECT_NEAR(next_state_cpu(dim), s_next[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" <<\n//         y_dim; EXPECT_TRUE(isfinite(s_next[point][dim]));\n//       }\n//     }\n//   }\n//   dynamics.freeCudaMem();\n// }\n//\n// TEST_F(BicycleSlipParametricTest, ComputeStateTrajectoryFiniteTest)\n// {\n//   BicycleSlipParametric dynamics = BicycleSlipParametric();\n//   using PARAMS = BicycleSlipParametric::DYN_PARAMS_T;\n//   PARAMS params;\n//   params.c_t[0] = 3.0;\n//   params.c_b[0] = 0.2;\n//   params.c_v[0] = 0.2;\n//   params.c_0 = 0.2;\n//   params.wheel_base = 3.0;\n//   params.steering_constant = 1.0;\n//   dynamics.setParams(params);\n//\n//   Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 500> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, 500>::Zero();\n//   Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 500> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, 500>::Zero();\n//   BicycleSlipParametric::state_array state_der;\n//   BicycleSlipParametric::state_array x, x_next;\n//   BicycleSlipParametric::output_array output;\n//   x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -0.000735827;\n//\n//   for (int i = 0; i < 500; i++)\n//   {\n//     BicycleSlipParametric::control_array u = control_trajectory.col(i);\n//     dynamics.step(x, x_next, state_der, u, output, i, 0.02);\n//     dynamics.computeDynamics(x, u, state_der);\n//     EXPECT_TRUE(x.allFinite());\n//     EXPECT_TRUE(x_next.allFinite());\n//     EXPECT_TRUE(state_der.allFinite());\n//     EXPECT_TRUE(u.allFinite());\n//     EXPECT_TRUE(state_der != BicycleSlipParametric::state_array::Zero());\n//     x = x_next;\n//   }\n//   params.steering_constant = 0.5;\n//   dynamics.setParams(params);\n//\n//   x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -1.0;\n//   for (int i = 0; i < 500; i++)\n//   {\n//     BicycleSlipParametric::control_array u = control_trajectory.col(i);\n//     dynamics.step(x, x_next, state_der, u, output, i, 0.02);\n//     dynamics.computeDynamics(x, u, state_der);\n//     EXPECT_TRUE(x.allFinite());\n//     EXPECT_TRUE(x_next.allFinite());\n//     EXPECT_TRUE(state_der.allFinite());\n//     EXPECT_TRUE(u.allFinite());\n//     EXPECT_TRUE(state_der != BicycleSlipParametric::state_array::Zero());\n//     x = x_next;\n//   }\n// }\n//\n// // class LinearDummy : public BicycleSlipParametric {\n// // public:\n// //   bool computeGrad(const Eigen::Ref<const state_array> & state,\n// //                    const Eigen::Ref<const control_array>& control,\n// //                    Eigen::Ref<dfdx> A,\n// //                    Eigen::Ref<dfdu> B) {\n// //     return false;\n// //   };\n// // };\n//\n// // TEST_F(BicycleSlipParametricTest, TestComputeGradComputation) {\n// //   Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM,\n// //                 BicycleSlipParametric::STATE_DIM + BicycleSlipParametric::CONTROL_DIM>\n// //       numeric_jac;\n// //   Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM,\n// //                 BicycleSlipParametric::STATE_DIM + BicycleSlipParametric::CONTROL_DIM>\n// //       analytic_jac;\n// //\n// //   const int num_rollouts = 100;\n// //   Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, num_rollouts> control_trajectory;\n// //   control_trajectory = Eigen::Matrix<float, BicycleSlipParametric::CONTROL_DIM, num_rollouts>::Random();\n// //   Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, num_rollouts> state_trajectory;\n// //   state_trajectory = Eigen::Matrix<float, BicycleSlipParametric::STATE_DIM, num_rollouts>::Random();\n// //\n// //   // TODO properly scale the random values\n// //\n// //   auto analytic_grad_model = BicycleSlipParametric();\n// //\n// //   BicycleSlipParametric::dfdx A_analytic = BicycleSlipParametric::dfdx::Zero();\n// //   BicycleSlipParametric::dfdu B_analytic = BicycleSlipParametric::dfdu::Zero();\n// //\n// //   auto numerical_grad_model = LinearDummy();\n// //\n// //   std::shared_ptr<ModelWrapperDDP<LinearDummy>> ddp_model =\n// //       std::make_shared<ModelWrapperDDP<LinearDummy>>(&numerical_grad_model);\n// //\n// //   auto params = analytic_grad_model.getParams();\n// //   // params.c_t[0] = 3.0;\n// //   // params.c_b[0] = 0.2;\n// //   // params.c_v[0] = 0.2;\n// //   params.c_0 = 0.0;\n// //   params.wheel_base = 3.0;\n// //   params.steering_constant = 1.1;\n// //   params.low_min_throttle = 0.0;\n// //   params.max_brake_rate_pos = 10.0;\n// //   analytic_grad_model.setParams(params);\n// //   numerical_grad_model.setParams(params);\n// //\n// //   auto limits = analytic_grad_model.getControlRanges();\n// //   limits[0].x = -0.3;\n// //   limits[0].y = 1.0;\n// //   limits[1].x = -1.0;\n// //   limits[1].y = 1.0;\n// //   analytic_grad_model.setControlRanges(limits);\n// //   numerical_grad_model.setControlRanges(limits);\n// //\n// //   state_trajectory.col(0) << 5, M_PI_4, 1, 1, 3, 0, 0, 0, 2;\n// //   control_trajectory.col(0) << 0.5, 1;\n// //\n// //   state_trajectory.col(1) << 5, M_PI_4, 1, 1, 3, 0, 0, 0, 2;\n// //   control_trajectory.col(1) << -1.0, 1;\n// //\n// //   // state_trajectory.col(5) = state_trajectory.col(5).cwiseAbs();\n// //\n// //   for (int i = 0; i < num_rollouts; i++)\n// //   {\n// //     BicycleSlipParametric::state_array state = state_trajectory.col(i);\n// //     BicycleSlipParametric::control_array control = control_trajectory.col(i);\n// //\n// //     if (abs(state(0)) < 1)\n// //     {\n// //       state(0) = state(0) * 10;\n// //       state(1) = state(1) * M_PI;\n// //       state(2) = state(2) * 100;\n// //       state(3) = state(3) * 100;\n// //       state(4) = state(4) * 5;\n// //       state(6) = state(6) * M_PI_2;\n// //       state(7) = state(7) * M_PI_2;\n// //       state(8) = state(8) * 10;\n// //     }\n// //     state(5) = min(abs(state(5) / 0.33f), 0.3f);\n// //\n// //     // std::cout << \"iteration \" << i << std::endl;\n// //     // std::cout << \"state: \" << state.transpose() << std::endl;\n// //     // std::cout << \"control: \" << control.transpose() << std::endl;\n// //\n// //     bool analytic_grad = analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n// //     EXPECT_TRUE(analytic_grad);\n// //\n// //     analytic_jac.leftCols<BicycleSlipParametric::STATE_DIM>() = A_analytic;\n// //     analytic_jac.rightCols<BicycleSlipParametric::CONTROL_DIM>() = B_analytic;\n// //     numeric_jac = ddp_model->df(state, control);\n// //\n// //     EXPECT_LT((numeric_jac - analytic_jac).norm(), 5e-2)\n// //         << \"at index \" << i << \"\\nstate: \" << state.transpose() << \"\\ncontrol \" << control.transpose()\n// //         << \"\\nNumeric Jacobian\\n\"\n// //         << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n// //         << analytic_jac;\n// //   }\n// // }\n"
  },
  {
    "path": "tests/dynamics/cartpole_dynamics_tests.cu",
    "content": "//\n// Created by mgandhi3 on 10/4/19.\n//\n\n#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <kernel_tests/dynamics/cartpole/cartpole_dynamics_kernel_test.cuh>\n#include <cuda_runtime.h>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <memory>\n\nTEST(CartPole, StateDim)\n{\n  auto CP = CartpoleDynamics(1, 1, 1);\n  EXPECT_EQ(4, CartpoleDynamics::STATE_DIM);\n}\n\nTEST(CartPole, ControlDim)\n{\n  auto CP = CartpoleDynamics(1, 1, 1);\n  EXPECT_EQ(1, CartpoleDynamics::CONTROL_DIM);\n}\n\nTEST(CartPole, Equilibrium)\n{\n  auto CP = CartpoleDynamics(1, 1, 1);\n\n  CartpoleDynamics::state_array state;\n  state << 0, 0, 0, 0;\n\n  CartpoleDynamics::control_array control;\n  control << 0;\n\n  CartpoleDynamics::state_array state_dot_compute;\n  state_dot_compute << 1, 1, 1, 1;\n\n  CartpoleDynamics::state_array state_dot_known;\n  state_dot_known << 0, 0, 0, 0;\n\n  CP.computeDynamics(state, control, state_dot_compute);\n  for (int i = 0; i < CartpoleDynamics::STATE_DIM; i++)\n  {\n    EXPECT_NEAR(state_dot_known(i), state_dot_compute(i), 1e-4) << \"Failed at index: \" << i;\n  }\n}\n\nTEST(CartPole, BindStream)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  auto CP = CartpoleDynamics(1, 1, 2, stream);\n\n  EXPECT_EQ(CP.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(CartPole, SetGetParamsHost)\n{\n  auto params = CartpoleDynamicsParams(2.0, 3.0, 4.0);\n  auto CP = CartpoleDynamics(1, 1, 2);\n\n  CP.setParams(params);\n  auto CP_params = CP.getParams();\n\n  EXPECT_FLOAT_EQ(params.cart_mass, CP_params.cart_mass);\n  EXPECT_FLOAT_EQ(params.pole_mass, CP_params.pole_mass);\n  EXPECT_FLOAT_EQ(params.pole_length, CP_params.pole_length);\n}\n\nTEST(CartPole, CartPole_GPUSetup_Test)\n{\n  auto CP_host = new CartpoleDynamics(1, 2, 2);\n  CP_host->GPUSetup();\n  // float mass;\n  // launchParameterTestKernel(*CP_host, mass);\n\n  EXPECT_TRUE(CP_host->GPUMemStatus_);\n\n  delete (CP_host);\n}\n\nTEST(CartPole, GetCartMassFromGPU)\n{\n  auto CP_host = new CartpoleDynamics(1, 1, 2);\n  CP_host->GPUSetup();\n\n  auto params = CartpoleDynamicsParams(2.0, 3.0, 4.0);\n  CP_host->setParams(params);\n  float mass;\n\n  launchCartMassTestKernel(*CP_host, mass);\n\n  EXPECT_FLOAT_EQ(params.cart_mass, mass);\n\n  CP_host->freeCudaMem();\n  delete (CP_host);\n}\n\nTEST(CartPole, GetPoleMassFromGPU)\n{\n  auto CP_host = new CartpoleDynamics(1, 1, 2);\n  CP_host->GPUSetup();\n\n  auto params = CartpoleDynamicsParams(2.0, 3.0, 4.0);\n  CP_host->setParams(params);\n  float mass;\n\n  launchPoleMassTestKernel(*CP_host, mass);\n\n  EXPECT_FLOAT_EQ(params.pole_mass, mass);\n\n  CP_host->freeCudaMem();\n  delete (CP_host);\n}\n\nTEST(CartPole, GetPoleLengthFromGPU)\n{\n  auto CP_host = new CartpoleDynamics(1, 1, 2);\n  CP_host->GPUSetup();\n\n  auto params = CartpoleDynamicsParams(2.0, 3.0, 4.0);\n  CP_host->setParams(params);\n  float length;\n\n  launchPoleLengthTestKernel(*CP_host, length);\n\n  EXPECT_FLOAT_EQ(params.pole_length, length);\n\n  CP_host->freeCudaMem();\n  delete (CP_host);\n}\n\nTEST(CartPole, GetGravityFromGPU)\n{\n  auto CP_host = new CartpoleDynamics(1, 1, 2);\n  CP_host->GPUSetup();\n\n  auto params = CartpoleDynamicsParams(2.0, 3.0, 4.0);\n  CP_host->setParams(params);\n  float gravity_gpu;\n\n  launchGravityTestKernel(*CP_host, gravity_gpu);\n\n  EXPECT_FLOAT_EQ(CP_host->getGravity(), gravity_gpu);\n\n  CP_host->freeCudaMem();\n  delete (CP_host);\n}\n\nTEST(CartPole, TestDynamicsGPU)\n{\n  auto CP_host = new CartpoleDynamics(1, 1, 2);\n  CP_host->GPUSetup();\n\n  auto params = CartpoleDynamicsParams(2.0, 3.0, 4.0);\n  CP_host->setParams(params);\n\n  CartpoleDynamics::state_array state;\n  state(0) = 0.1;\n  state(1) = 0.3;\n  state(2) = 0.23;\n  state(3) = 0.334;\n  CartpoleDynamics::control_array control;\n  control(0) = 0.654;\n\n  // These variables will be changed so initialized to the right size only\n  CartpoleDynamics::state_array state_der_cpu = CartpoleDynamics::state_array::Zero();\n\n  float state_der_gpu[CartpoleDynamics::STATE_DIM];\n\n  // Run dynamics on CPU\n  CP_host->computeDynamics(state, control, state_der_cpu);\n  // Run dynamics on GPU\n  launchDynamicsTestKernel(*CP_host, state.data(), control.data(), state_der_gpu);\n\n  // Compare CPU and GPU Results\n  for (int i = 0; i < CartpoleDynamics::STATE_DIM; i++)\n  {\n    EXPECT_FLOAT_EQ(state_der_cpu(i), state_der_gpu[i]);\n  }\n\n  CP_host->freeCudaMem();\n  delete (CP_host);\n}\n\nclass CartpoleDummy : public CartpoleDynamics\n{\npublic:\n  CartpoleDummy(float cartMass, float poleMass, float poleLength, cudaStream_t stream = 0)\n    : CartpoleDynamics(cartMass, poleMass, poleLength, stream){};\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n  {\n    return false;\n  };\n};\n\nTEST(CartPole, TestComputeGradComputation)\n{\n  Eigen::Matrix<float, CartpoleDynamics::STATE_DIM, CartpoleDynamics::STATE_DIM + CartpoleDynamics::CONTROL_DIM>\n      numeric_jac;\n  Eigen::Matrix<float, CartpoleDynamics::STATE_DIM, CartpoleDynamics::STATE_DIM + CartpoleDynamics::CONTROL_DIM>\n      analytic_jac;\n  CartpoleDynamics::state_array state;\n  state << 1, 2, 3, 4;\n  CartpoleDynamics::control_array control;\n  control << 5;\n\n  auto analytic_grad_model = CartpoleDynamics(1, 1, 1);\n\n  CartpoleDynamics::dfdx A_analytic = CartpoleDynamics::dfdx::Zero();\n  CartpoleDynamics::dfdu B_analytic = CartpoleDynamics::dfdu::Zero();\n\n  analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n\n  auto numerical_grad_model = CartpoleDummy(1, 1, 1);\n\n  std::shared_ptr<ModelWrapperDDP<CartpoleDummy>> ddp_model =\n      std::make_shared<ModelWrapperDDP<CartpoleDummy>>(&numerical_grad_model);\n\n  analytic_jac.leftCols<CartpoleDynamics::STATE_DIM>() = A_analytic;\n  analytic_jac.rightCols<CartpoleDynamics::CONTROL_DIM>() = B_analytic;\n  numeric_jac = ddp_model->df(state, control);\n\n  ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\"\n                                                       << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n                                                       << analytic_jac;\n}\n"
  },
  {
    "path": "tests/dynamics/dubins_dynamics_tests.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/dubins/dubins.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <cuda_runtime.h>\n\nTEST(DubinsDynamics, Template)\n{\n  auto dynamics = DubinsDynamics();\n  EXPECT_EQ(3, DubinsDynamics::STATE_DIM);\n  EXPECT_EQ(2, DubinsDynamics::CONTROL_DIM);\n}\n\nTEST(DubinsDynamics, BindStream)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  auto dynamics = DubinsDynamics(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(DubinsDynamics, ComputeDynamics)\n{\n  DubinsDynamics dynamics = DubinsDynamics();\n  DubinsDynamics::state_array x;\n  x << 0, 0, 0;\n  DubinsDynamics::control_array u;\n  u << 0, 0;\n\n  DubinsDynamics::state_array next_x;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 0);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 0);\n\n  x << 1, 2, 0;\n  u << 1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 1);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 0);\n\n  x << 1, 2, 0;\n  u << 3, 1;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 3);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 1);\n\n  x << 1, 2, M_PI_2;\n  u << 4, 1;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_NEAR(next_x(0), 0.0, 1e-5);\n  EXPECT_FLOAT_EQ(next_x(1), 4 * sin(M_PI_2));\n  EXPECT_FLOAT_EQ(next_x(2), 1);\n\n  // TODO test case for flipping across angle discontinuity\n  x << 1, 2, M_PI_2;\n  u << 4, 1;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_NEAR(next_x(0), 0.0, 1e-5);\n  EXPECT_FLOAT_EQ(next_x(1), 4 * sin(M_PI_2));\n  EXPECT_FLOAT_EQ(next_x(2), 1);\n}\n\nTEST(DubinsDynamics, TestDynamicsGPU)\n{\n  DubinsDynamics dynamics = DubinsDynamics();\n  dynamics.GPUSetup();\n\n  DubinsDynamics::state_array state;\n  state(0) = 0.5;\n  state(1) = 0.7;\n  state(2) = M_PI_4;\n  DubinsDynamics::control_array control;\n  control(0) = 3.0;\n  control(1) = 2.0;\n\n  std::vector<std::array<float, 3>> s(1);\n  for (int dim = 0; dim < 3; dim++)\n  {\n    s[0][dim] = state(dim);\n  }\n  std::vector<std::array<float, 3>> s_der(1);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(1);\n  for (int dim = 0; dim < 2; dim++)\n  {\n    u[0][dim] = control(dim);\n  }\n\n  // These variables will be changed so initialized to the right size only\n  DubinsDynamics::state_array state_der_cpu = DubinsDynamics::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  dynamics.computeDynamics(state, control, state_der_cpu);\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 3; y_dim++)\n  {\n    launchComputeDynamicsTestKernel<DubinsDynamics, 3, 2>(dynamics, s, u, s_der, y_dim);\n    for (int dim = 0; dim < 3; dim++)\n    {\n      EXPECT_FLOAT_EQ(state_der_cpu(dim), s_der[0][dim]);\n    }\n  }\n\n  dynamics.freeCudaMem();\n}\n\nTEST(DubinsDynamics, TestUpdateStateGPU)\n{\n  DubinsDynamics dynamics = DubinsDynamics();\n  dynamics.GPUSetup();\n\n  DubinsDynamics::state_array state;\n  state(0) = 0.5;\n  state(1) = 0.7;\n  state(2) = M_PI;\n  DubinsDynamics::control_array control;\n  control(0) = 3.0;\n  control(1) = 2.0;\n\n  std::vector<std::array<float, 3>> s(1);\n  for (int dim = 0; dim < 3; dim++)\n  {\n    s[0][dim] = state(dim);\n  }\n  std::vector<std::array<float, 3>> s_der(1);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(1);\n  for (int dim = 0; dim < 2; dim++)\n  {\n    u[0][dim] = control(dim);\n  }\n\n  // These variables will be changed so initialized to the right size only\n  DubinsDynamics::state_array state_der_cpu = DubinsDynamics::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  dynamics.computeDynamics(state, control, state_der_cpu);\n  dynamics.updateState(state, state_der_cpu, 0.1f);\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 3; y_dim++)\n  {\n    launchComputeStateDerivTestKernel<DubinsDynamics, 3, 2>(dynamics, s, u, s_der, y_dim);\n    launchUpdateStateTestKernel<DubinsDynamics, 3>(dynamics, s, s_der, 0.1f, y_dim);\n    for (int dim = 0; dim < 3; dim++)\n    {\n      EXPECT_FLOAT_EQ(state_der_cpu(dim), s_der[0][dim]);\n    }\n  }\n  dynamics.freeCudaMem();\n}\n\nclass DubinsDummy : public DubinsDynamics\n{\npublic:\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n  {\n    return false;\n  };\n};\n\nTEST(DubinsDynamics, TestComputeGradComputation)\n{\n  Eigen::Matrix<float, DubinsDynamics::STATE_DIM, DubinsDynamics::STATE_DIM + DubinsDynamics::CONTROL_DIM> numeric_jac;\n  Eigen::Matrix<float, DubinsDynamics::STATE_DIM, DubinsDynamics::STATE_DIM + DubinsDynamics::CONTROL_DIM> analytic_jac;\n  DubinsDynamics::state_array state;\n  state << 1, 2, 3;\n  DubinsDynamics::control_array control;\n  control << 5;\n\n  auto analytic_grad_model = DubinsDynamics();\n\n  DubinsDynamics::dfdx A_analytic = DubinsDynamics::dfdx::Zero();\n  DubinsDynamics::dfdu B_analytic = DubinsDynamics::dfdu::Zero();\n\n  analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n\n  auto numerical_grad_model = DubinsDummy();\n\n  std::shared_ptr<ModelWrapperDDP<DubinsDummy>> ddp_model =\n      std::make_shared<ModelWrapperDDP<DubinsDummy>>(&numerical_grad_model);\n\n  analytic_jac.leftCols<DubinsDynamics::STATE_DIM>() = A_analytic;\n  analytic_jac.rightCols<DubinsDynamics::CONTROL_DIM>() = B_analytic;\n  numeric_jac = ddp_model->df(state, control);\n\n  ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\"\n                                                       << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n                                                       << analytic_jac;\n}\n"
  },
  {
    "path": "tests/dynamics/dynamics_generic_tests.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <cuda_runtime.h>\n#include <mppi/dynamics/dynamics.cuh>\n\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n\ntemplate <int STATE_DIM = 1, int CONTROL_DIM = 1, int OUTPUT_DIM = 1>\nstruct DynamicsTesterParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    NUM_STATES = STATE_DIM\n  };\n  enum class ControlIndex : int\n  {\n    NUM_CONTROLS = CONTROL_DIM\n  };\n  enum class OutputIndex : int\n  {\n    NUM_OUTPUTS = OUTPUT_DIM\n  };\n  int var_1 = 1;\n  int var_2 = 2;\n  float4 var_4;\n};\n\ntemplate <int STATE_DIM = 1, int CONTROL_DIM = 1>\nclass DynamicsTester\n  : public MPPI_internal::Dynamics<DynamicsTester<STATE_DIM, CONTROL_DIM>, DynamicsTesterParams<STATE_DIM, CONTROL_DIM>>\n{\npublic:\n  typedef MPPI_internal::Dynamics<DynamicsTester<STATE_DIM, CONTROL_DIM>, DynamicsTesterParams<STATE_DIM, CONTROL_DIM>>\n      PARENT_CLASS;\n\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n\n  DynamicsTester(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n\n  DynamicsTester(DynamicsTesterParams<STATE_DIM, CONTROL_DIM>& params, cudaStream_t stream = 0)\n    : PARENT_CLASS(params, stream)\n  {\n  }\n\n  DynamicsTester(std::array<float2, CONTROL_DIM> control_rngs, cudaStream_t stream = 0)\n    : PARENT_CLASS(control_rngs, stream)\n  {\n  }\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der)\n  {\n    state_der(1) = control(0);\n  }\n\n  void computeKinematics(const Eigen::Ref<const state_array>& state, Eigen::Ref<state_array> s_der)\n  {\n    s_der(0) = state(0) + state(1);\n  };\n\n  // TODO must be properly parallelized\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta_s = nullptr)\n  {\n    state_der[1] = control[0];\n  }\n\n  // TODO must be properly parallelized\n  __device__ void computeKinematics(float* state, float* state_der)\n  {\n    state_der[0] = state[0] + state[1];\n  }\n\n  state_array stateFromMap(const std::map<std::string, float>& map)\n  {\n    return state_array::Zero();\n  }\n};\n\nTEST(Dynamics, BindStream)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  auto tester = DynamicsTester<>(stream);\n\n  EXPECT_EQ(tester.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  std::array<float2, 1> tester_ranges{};\n  tester = DynamicsTester<>(tester_ranges, stream);\n\n  EXPECT_EQ(tester.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(Dynamics, GPUSetupAndCudaFree)\n{\n  DynamicsTester<> tester;\n  EXPECT_EQ(tester.model_d_, nullptr);\n  EXPECT_EQ(tester.GPUMemStatus_, false);\n\n  tester.GPUSetup();\n  EXPECT_NE(tester.model_d_, nullptr);\n  EXPECT_EQ(tester.GPUMemStatus_, true);\n\n  tester.freeCudaMem();\n  EXPECT_EQ(tester.model_d_, nullptr);\n  EXPECT_EQ(tester.GPUMemStatus_, false);\n}\n\nTEST(Dynamics, setParamsCPU)\n{\n  DynamicsTester<> tester;\n  DynamicsTesterParams<> params_result = tester.getParams();\n  EXPECT_EQ(params_result.var_1, 1);\n  EXPECT_EQ(params_result.var_2, 2);\n\n  DynamicsTesterParams<> params;\n  params.var_1 = 10;\n  params.var_2 = 20;\n  params.var_4.x = 1.5;\n  params.var_4.y = 2.5;\n  params.var_4.z = 3.5;\n  params.var_4.w = 4.5;\n\n  tester.setParams(params);\n  params_result = tester.getParams();\n\n  EXPECT_EQ(params_result.var_1, params.var_1);\n  EXPECT_EQ(params_result.var_2, params.var_2);\n  EXPECT_EQ(params_result.var_4.x, params.var_4.x);\n  EXPECT_EQ(params_result.var_4.y, params.var_4.y);\n  EXPECT_EQ(params_result.var_4.z, params.var_4.z);\n  EXPECT_EQ(params_result.var_4.w, params.var_4.w);\n\n  // Test params constructor\n  params.var_4.z = 1.5;\n  DynamicsTester<> tester_2 = DynamicsTester<>(params);\n  params_result = tester_2.getParams();\n\n  EXPECT_EQ(params_result.var_1, params.var_1);\n  EXPECT_EQ(params_result.var_2, params.var_2);\n  EXPECT_EQ(params_result.var_4.x, params.var_4.x);\n  EXPECT_EQ(params_result.var_4.y, params.var_4.y);\n  EXPECT_EQ(params_result.var_4.z, params.var_4.z);\n  EXPECT_EQ(params_result.var_4.w, params.var_4.w);\n}\n\nTEST(Dynamics, setParamsGPU)\n{\n  DynamicsTester<> tester;\n  tester.GPUSetup();\n  DynamicsTesterParams<> params_result = tester.getParams();\n  EXPECT_EQ(params_result.var_1, 1);\n  EXPECT_EQ(params_result.var_2, 2);\n\n  DynamicsTesterParams<> params;\n  params.var_1 = 10;\n  params.var_2 = 20;\n  params.var_4.x = 1.5;\n  params.var_4.y = 2.5;\n  params.var_4.z = 3.5;\n  params.var_4.w = 4.5;\n\n  tester.setParams(params);\n  launchParameterTestKernel<DynamicsTester<>, DynamicsTesterParams<>>(tester, params_result);\n\n  EXPECT_EQ(params_result.var_1, params.var_1);\n  EXPECT_EQ(params_result.var_2, params.var_2);\n  EXPECT_EQ(params_result.var_4.x, params.var_4.x);\n  EXPECT_EQ(params_result.var_4.y, params.var_4.y);\n  EXPECT_EQ(params_result.var_4.z, params.var_4.z);\n  EXPECT_EQ(params_result.var_4.w, params.var_4.w);\n\n  // Test params constructor\n  params.var_4.z = 1.5;\n  DynamicsTester<> tester_2 = DynamicsTester<>(params);\n  tester_2.GPUSetup();\n  launchParameterTestKernel<DynamicsTester<>, DynamicsTesterParams<>>(tester_2, params_result);\n\n  EXPECT_EQ(params_result.var_1, params.var_1);\n  EXPECT_EQ(params_result.var_2, params.var_2);\n  EXPECT_EQ(params_result.var_4.x, params.var_4.x);\n  EXPECT_EQ(params_result.var_4.y, params.var_4.y);\n  EXPECT_EQ(params_result.var_4.z, params.var_4.z);\n  EXPECT_EQ(params_result.var_4.w, params.var_4.w);\n}\n\nTEST(Dynamics, ClassConstants)\n{\n  DynamicsTester<> tester;\n  EXPECT_EQ(tester.STATE_DIM, 1);\n  EXPECT_EQ(tester.CONTROL_DIM, 1);\n  EXPECT_EQ(tester.getGrdSharedSizeBytes(), 0);\n  EXPECT_EQ(tester.getBlkSharedSizeBytes(), 0);\n\n  DynamicsTester<56, 65> tester_2;\n  int state_dim = DynamicsTester<56, 65>::STATE_DIM;\n  EXPECT_EQ(state_dim, 56);\n  int control_dim = DynamicsTester<56, 65>::CONTROL_DIM;\n  EXPECT_EQ(control_dim, 65);\n  EXPECT_EQ(tester_2.getGrdSharedSizeBytes(), 0);\n  EXPECT_EQ(tester_2.getBlkSharedSizeBytes(), 0);\n}\n\nTEST(Dynamics, SetControlRangesDefault)\n{\n  DynamicsTester<> tester;\n  auto ranges = tester.getControlRanges();\n  EXPECT_FLOAT_EQ(ranges[0].x, -FLT_MAX);\n  EXPECT_FLOAT_EQ(ranges[0].y, FLT_MAX);\n\n  DynamicsTester<4, 2> tester_2;\n  auto ranges_2 = tester.getControlRanges();\n  for (int i = 0; i < ranges_2.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(ranges_2[i].x, -FLT_MAX);\n    EXPECT_FLOAT_EQ(ranges_2[i].y, FLT_MAX);\n  }\n\n  auto params3 = tester.getParams();\n  DynamicsTester<> tester_3 = DynamicsTester<>(params3);\n  auto ranges_3 = tester_3.getControlRanges();\n  EXPECT_FLOAT_EQ(ranges_3[0].x, -FLT_MAX);\n  EXPECT_FLOAT_EQ(ranges_3[0].y, FLT_MAX);\n\n  auto params4 = tester_2.getParams();\n  DynamicsTester<4, 2> tester_4 = DynamicsTester<4, 2>(params4);\n  auto ranges_4 = tester_4.getControlRanges();\n  for (int i = 0; i < ranges_4.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(ranges_4[i].x, -FLT_MAX);\n    EXPECT_FLOAT_EQ(ranges_4[i].y, FLT_MAX);\n  }\n}\n\nTEST(Dynamics, SetControlRanges)\n{\n  std::array<float2, 1> tester_ranges{};\n  tester_ranges[0].x = -2;\n  tester_ranges[0].y = 5;\n  DynamicsTester<> tester(tester_ranges);\n  auto ranges = tester.getControlRanges();\n  EXPECT_FLOAT_EQ(ranges[0].x, -2);\n  EXPECT_FLOAT_EQ(ranges[0].y, 5);\n\n  tester_ranges[0].x = -5;\n  tester_ranges[0].y = 6;\n  tester.setControlRanges(tester_ranges);\n  ranges = tester.getControlRanges();\n  EXPECT_FLOAT_EQ(ranges[0].x, tester_ranges[0].x) << \"failed at index: \" << 0;\n  EXPECT_FLOAT_EQ(ranges[0].y, tester_ranges[0].y) << \"failed at index: \" << 0;\n\n  std::array<float2, 2> tester_2_ranges{};\n  tester_ranges[0].x = -3;\n  tester_ranges[0].y = 8;\n  tester_ranges[1].x = -11;\n  tester_ranges[1].y = 23;\n  DynamicsTester<4, 2> tester_2(tester_2_ranges);\n  auto ranges_2 = tester_2.getControlRanges();\n  for (int i = 0; i < ranges_2.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(ranges_2[i].x, tester_2_ranges[i].x) << \"failed at index: \" << i;\n    EXPECT_FLOAT_EQ(ranges_2[i].y, tester_2_ranges[i].y) << \"failed at index: \" << i;\n  }\n}\n\nTEST(Dynamics, SetControlRangesGPU)\n{\n  std::array<float2, 1> tester_ranges{};\n  tester_ranges[0].x = -2;\n  tester_ranges[0].y = 5;\n  DynamicsTester<> tester(tester_ranges);\n  tester.GPUSetup();\n  std::array<float2, 1> ranges_result = {};\n  launchControlRangesTestKernel<DynamicsTester<>, 1>(tester, ranges_result);\n  EXPECT_FLOAT_EQ(ranges_result[0].x, -2);\n  EXPECT_FLOAT_EQ(ranges_result[0].y, 5);\n\n  std::array<float2, 2> tester_2_ranges{};\n  tester_2_ranges[0].x = -5;\n  tester_2_ranges[0].y = 6;\n  tester_2_ranges[1].x = -10;\n  tester_2_ranges[1].y = 20;\n  DynamicsTester<4, 2> tester_2(tester_2_ranges);\n  tester_2.GPUSetup();\n  std::array<float2, 2> ranges_result_2 = {};\n  launchControlRangesTestKernel<DynamicsTester<4, 2>, 2>(tester_2, ranges_result_2);\n  for (int i = 0; i < ranges_result_2.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(ranges_result_2[i].x, tester_2_ranges[i].x) << \"failed at index: \" << i;\n    EXPECT_FLOAT_EQ(ranges_result_2[i].y, tester_2_ranges[i].y) << \"failed at index: \" << i;\n  }\n}\n\nTEST(Dynamics, enforceConstraintsCPU)\n{\n  std::array<float2, 1> tester_ranges{};\n  tester_ranges[0].x = -2;\n  tester_ranges[0].y = 5;\n  DynamicsTester<> tester(tester_ranges);\n\n  DynamicsTester<>::state_array s(1, 1);\n  DynamicsTester<>::control_array u(1, 1);\n\n  u(0) = 100;\n  tester.enforceConstraints(s, u);\n  EXPECT_FLOAT_EQ(u(0), 5);\n\n  u(0) = -42178;\n  tester.enforceConstraints(s, u);\n  EXPECT_FLOAT_EQ(u(0), -2);\n\n  u(0) = 2;\n  tester.enforceConstraints(s, u);\n  EXPECT_FLOAT_EQ(u(0), 2);\n\n  u(0) = -1.5;\n  tester.enforceConstraints(s, u);\n  EXPECT_FLOAT_EQ(u(0), -1.5);\n}\n\nTEST(Dynamics, enforceConstraintsGPU)\n{\n  std::array<float2, 3> tester_ranges{};\n  tester_ranges[0].x = -2;\n  tester_ranges[0].y = 5;\n  tester_ranges[1].x = -6;\n  tester_ranges[1].y = 8;\n  tester_ranges[2].x = -11;\n  tester_ranges[2].y = 16;\n  DynamicsTester<1, 3> tester(tester_ranges);\n  tester.GPUSetup();\n\n  std::vector<std::array<float, 1>> states(4);\n  std::vector<std::array<float, 3>> controls(4);\n\n  states[0][0] = 48;\n  states[1][0] = 4518;\n  states[2][0] = 451;\n  states[3][0] = 4516;\n\n  controls[0][0] = 48;\n  controls[0][1] = 48;\n  controls[0][2] = 48;\n  controls[1][0] = -51;\n  controls[1][1] = -51;\n  controls[1][2] = -51;\n  controls[2][0] = 2;\n  controls[2][1] = 2;\n  controls[2][2] = 2;\n  controls[3][0] = -1.5;\n  controls[3][1] = -1.5;\n  controls[3][2] = -1.5;\n\n  // try a bunch of different y dim\n  for (int j = 1; j < 6; j++)\n  {\n    states[0][0] = 48;\n    states[1][0] = 4518;\n    states[2][0] = 451;\n    states[3][0] = 4516;\n\n    controls[0][0] = 48;\n    controls[0][1] = 48;\n    controls[0][2] = 48;\n    controls[1][0] = -51;\n    controls[1][1] = -51;\n    controls[1][2] = -51;\n    controls[2][0] = 2;\n    controls[2][1] = 2;\n    controls[2][2] = 2;\n    controls[3][0] = -1.5;\n    controls[3][1] = -1.5;\n    controls[3][2] = -1.5;\n\n    launchEnforceConstraintTestKernel<DynamicsTester<1, 3>, 1, 3>(tester, states, controls, j);\n\n    EXPECT_FLOAT_EQ(controls[0][0], 5);\n    EXPECT_FLOAT_EQ(controls[0][1], 8);\n    EXPECT_FLOAT_EQ(controls[0][2], 16);\n    EXPECT_FLOAT_EQ(controls[1][0], -2);\n    EXPECT_FLOAT_EQ(controls[1][1], -6);\n    EXPECT_FLOAT_EQ(controls[1][2], -11);\n    EXPECT_FLOAT_EQ(controls[2][0], 2);\n    EXPECT_FLOAT_EQ(controls[2][1], 2);\n    EXPECT_FLOAT_EQ(controls[2][2], 2);\n    EXPECT_FLOAT_EQ(controls[3][0], -1.5);\n    EXPECT_FLOAT_EQ(controls[3][1], -1.5);\n    EXPECT_FLOAT_EQ(controls[3][2], -1.5);\n\n    EXPECT_FLOAT_EQ(states[0][0], 48);\n    EXPECT_FLOAT_EQ(states[1][0], 4518);\n    EXPECT_FLOAT_EQ(states[2][0], 451);\n    EXPECT_FLOAT_EQ(states[3][0], 4516);\n  }\n}\n\nTEST(Dynamics, updateStateCPU)\n{\n  DynamicsTester<> tester;\n  DynamicsTester<>::state_array s;\n  DynamicsTester<>::state_array s_der;\n\n  s(0) = 5;\n  s_der(0) = 10;\n\n  tester.updateState(s, s_der, 0.1);\n\n  EXPECT_FLOAT_EQ(s(0), 6);\n  EXPECT_FLOAT_EQ(s_der(0), 10);\n}\n\nTEST(Dynamics, updateStateGPU)\n{\n  DynamicsTester<4, 1> tester;\n  std::vector<std::array<float, 4>> s(1);\n  std::vector<std::array<float, 4>> s_der(1);\n\n  s[0][0] = 0;\n  s[0][1] = 1;\n  s[0][2] = 2;\n  s[0][3] = 3;\n\n  s_der[0][0] = 0;\n  s_der[0][1] = 1;\n  s_der[0][2] = 2;\n  s_der[0][3] = 3;\n\n  for (int i = 1; i < 6; i++)\n  {\n    s[0][0] = 0;\n    s[0][1] = 1;\n    s[0][2] = 2;\n    s[0][3] = 3;\n\n    s_der[0][0] = 0;\n    s_der[0][1] = 1;\n    s_der[0][2] = 2;\n    s_der[0][3] = 3;\n\n    launchUpdateStateTestKernel<DynamicsTester<4, 1>, 4>(tester, s, s_der, 0.1, i);\n\n    EXPECT_FLOAT_EQ(s[0][0], 0);\n    EXPECT_FLOAT_EQ(s[0][1], 1.1);\n    EXPECT_FLOAT_EQ(s[0][2], 2.2);\n    EXPECT_FLOAT_EQ(s[0][3], 3.3);\n    EXPECT_FLOAT_EQ(s_der[0][0], 0);\n    EXPECT_FLOAT_EQ(s_der[0][1], 1);\n    EXPECT_FLOAT_EQ(s_der[0][2], 2);\n    EXPECT_FLOAT_EQ(s_der[0][3], 3);\n  }\n}\n\nTEST(Dynamics, computeStateDerivCPU)\n{\n  DynamicsTester<2, 1> tester;\n  DynamicsTester<2, 1>::state_array s;\n  DynamicsTester<2, 1>::state_array s_der;\n  DynamicsTester<2, 1>::control_array u;\n\n  s(0) = 5;\n  s(1) = 10;\n  s_der(0) = 10;\n  s_der(1) = 20;\n  u(0) = 3;\n\n  tester.computeStateDeriv(s, u, s_der);\n\n  EXPECT_FLOAT_EQ(s(0), 5);\n  EXPECT_FLOAT_EQ(s(1), 10);\n  EXPECT_FLOAT_EQ(s_der(0), 15);\n  EXPECT_FLOAT_EQ(s_der(1), 3);\n  EXPECT_FLOAT_EQ(u(0), 3);\n}\n\nTEST(Dynamics, computeStateDerivGPU)\n{\n  DynamicsTester<2, 1> tester;\n  std::vector<std::array<float, 2>> s(1);\n  std::vector<std::array<float, 1>> u(1);\n  std::vector<std::array<float, 2>> s_der(1);\n\n  for (int j = 1; j < 6; j++)\n  {\n    s[0][0] = 5;\n    s[0][1] = 10;\n    s_der[0][0] = 10;\n    s_der[0][1] = 20;\n    u[0][0] = 3;\n\n    launchComputeStateDerivTestKernel<DynamicsTester<2, 1>, 2, 1>(tester, s, u, s_der, j);\n\n    EXPECT_FLOAT_EQ(s[0][0], 5) << \"j = \" << j;\n    EXPECT_FLOAT_EQ(s[0][1], 10) << \"j = \" << j;\n    EXPECT_FLOAT_EQ(s_der[0][0], 15) << \"j = \" << j;\n    EXPECT_FLOAT_EQ(s_der[0][1], 3) << \"j = \" << j;\n    EXPECT_FLOAT_EQ(u[0][0], 3) << \"j = \" << j;\n  }\n}\n\nTEST(Dynamics, stepGPU)\n{\n  DynamicsTester<2, 1> tester;\n  tester.GPUSetup();\n  std::vector<std::array<float, 2>> s(1);\n  std::vector<std::array<float, 1>> u(1);\n  std::vector<std::array<float, 2>> s_der(1);\n  std::vector<std::array<float, 2>> s_next(1);\n  int t = 0;\n  float dt = 0.5;\n\n  for (int dim_y = 1; dim_y < 6; dim_y++)\n  {\n    s[0][0] = 5;\n    s[0][1] = 10;\n    s_der[0][0] = 10;\n    s_der[0][1] = 20;\n    u[0][0] = 3;\n\n    launchStepTestKernel<DynamicsTester<2, 1>>(tester, s, u, s_der, s_next, t, dt, dim_y);\n\n    EXPECT_FLOAT_EQ(s[0][0], 5) << \"dim_y = \" << dim_y;\n    EXPECT_FLOAT_EQ(s[0][1], 10) << \"dim_y = \" << dim_y;\n    EXPECT_FLOAT_EQ(s_der[0][0], 15) << \"dim_y = \" << dim_y;\n    EXPECT_FLOAT_EQ(s_der[0][1], 3) << \"dim_y = \" << dim_y;\n    EXPECT_FLOAT_EQ(s_next[0][0], 5 + 7.5) << \"dim_y = \" << dim_y;\n    EXPECT_FLOAT_EQ(s_next[0][1], 10 + 1.5) << \"dim_y = \" << dim_y;\n    EXPECT_FLOAT_EQ(u[0][0], 3) << \"dim_y = \" << dim_y;\n  }\n}\n\nTEST(Dynamics, stepCPU)\n{\n  using DYN = DynamicsTester<2, 1>;\n  DYN tester;\n  DYN::state_array x;\n  DYN::state_array x_dot;\n  DYN::state_array x_next;\n  DYN::control_array u;\n  DYN::output_array output;\n\n  int t = 0;\n  float dt = 0.5;\n  x(0) = 5;\n  x(1) = 10;\n  x_dot(0) = 10;\n  x_dot(1) = 20;\n  u(0) = 3;\n\n  tester.step(x, x_next, x_dot, u, output, t, dt);\n\n  EXPECT_FLOAT_EQ(x(0), 5);\n  EXPECT_FLOAT_EQ(x(1), 10);\n  EXPECT_FLOAT_EQ(x_dot(0), 15);\n  EXPECT_FLOAT_EQ(x_dot(1), 3);\n  EXPECT_FLOAT_EQ(x_next(0), 5 + 7.5);\n  EXPECT_FLOAT_EQ(x_next(1), 10 + 1.5);\n  EXPECT_FLOAT_EQ(u(0), 3);\n}\n\nTEST(Dynamics, interpolateState)\n{\n  DynamicsTester<3, 1> tester;\n  DynamicsTester<3, 1>::state_array s1;\n  DynamicsTester<3, 1>::state_array s2;\n\n  s1(0) = 1.5;\n  s2(0) = 2.0;\n\n  s1(1) = -3.0;\n  s2(1) = -3.5;\n\n  s1(2) = -0.25;\n  s2(2) = 0.25;\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0)(0), 1.5);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.25)(0), 1.625);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.5)(0), 1.75);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.75)(0), 1.875);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 1.0)(0), 2.0);\n\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0)(1), -3.0);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.25)(1), -3.125);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.5)(1), -3.25);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.75)(1), -3.375);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 1.0)(1), -3.5);\n\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0)(2), -0.25);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.25)(2), -0.125);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.5)(2), 0);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 0.75)(2), 0.125);\n  EXPECT_FLOAT_EQ(tester.interpolateState(s1, s2, 1.0)(2), 0.25);\n}\n"
  },
  {
    "path": "tests/dynamics/linear_dynamics_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/dynamics/linear/linear.cuh>\n\nTEST(Linear, Dimensionality)\n{\n  using DYN_T = LinearDynamics<4, 12>;\n  ASSERT_EQ(4, DYN_T::STATE_DIM);\n  ASSERT_EQ(4, DYN_T::OUTPUT_DIM);\n  ASSERT_EQ(12, DYN_T::CONTROL_DIM);\n}\n\nTEST(Linear, SetParamsA)\n{\n  using DYN_T = LinearDynamics<4, 12>;\n  using DFDX = typename DYN_T::dfdx;\n  // Set A from params\n  DFDX A = DFDX::Random();\n  typename DYN_T::DYN_PARAMS_T params;\n  params.setA(A);\n  auto dynamics = DYN_T(params);\n  auto dyn_params = dynamics.getParams();\n  Eigen::Map<DFDX> A_params_result(dyn_params.A);\n  float diff = (A - A_params_result).sum();\n  EXPECT_FLOAT_EQ(diff, 0);\n  // Set A from dynamics\n  A = DFDX::Random();\n  dynamics.setA(A);\n  dyn_params = dynamics.getParams();\n  Eigen::Map<DFDX> A_class_result(dyn_params.A);\n  diff = (A - A_class_result).sum();\n  EXPECT_FLOAT_EQ(diff, 0);\n}\n\nTEST(Linear, SetParamsB)\n{\n  using DYN_T = LinearDynamics<4, 12>;\n  using DFDU = typename DYN_T::dfdu;\n  // Set B from params\n  DFDU B = DFDU::Random();\n  typename DYN_T::DYN_PARAMS_T params;\n  params.setB(B);\n  auto dynamics = DYN_T(params);\n  auto dyn_params = dynamics.getParams();\n  Eigen::Map<DFDU> B_result(dyn_params.B);\n  float diff = (B - B_result).sum();\n  EXPECT_FLOAT_EQ(diff, 0);\n  // Set A from dynamics\n  B = DFDU::Random();\n  dynamics.setB(B);\n  dyn_params = dynamics.getParams();\n  Eigen::Map<DFDU> B_class_result(dyn_params.B);\n  diff = (B - B_class_result).sum();\n  EXPECT_FLOAT_EQ(diff, 0);\n}\n\nTEST(Linear, CheckSharedMemorySizes)\n{\n  using DYN_T = LinearDynamics<4, 12>;\n  auto dynamics = DYN_T();\n  dynamics.GPUSetup();\n  int output_gpu[2] = { 0 };\n  int output_cpu[2] = { 0 };\n  launchGetSharedMemorySizesKernel<DYN_T>(dynamics, output_gpu);\n  output_cpu[0] = dynamics.getGrdSharedSizeBytes();\n  output_cpu[1] = dynamics.getBlkSharedSizeBytes();\n  ASSERT_EQ(output_cpu[0], output_gpu[0]);\n  ASSERT_EQ(output_cpu[1], output_gpu[1]);\n}\n\nTEST(Linear, StepCPUGPUComparison)\n{\n  using DYN_T = LinearDynamics<10, 12>;\n  using DFDU = typename DYN_T::dfdu;\n  using DFDX = typename DYN_T::dfdx;\n  DFDU B = DFDU::Random();\n  DFDX A = DFDX::Random();\n  auto dynamics = DYN_T();\n  dynamics.setA(A);\n  dynamics.setB(B);\n  typename DYN_T::buffer_trajectory buffer;\n\n  std::vector<int> x_sizes = { 1, 2, 4, 8, 16, 32 };\n  for (const auto& x_dim : x_sizes)\n  {\n    checkGPUComputationStep<DYN_T>(dynamics, 0.01, 32, x_dim, buffer);\n  }\n}\n\nTEST(Linear, JacobianCheck)\n{\n  using DYN_T = LinearDynamics<10, 6>;\n  using DFDU = typename DYN_T::dfdu;\n  using DFDX = typename DYN_T::dfdx;\n  using state_array = typename DYN_T::state_array;\n  using control_array = typename DYN_T::control_array;\n  auto dynamics = DYN_T();\n  DFDX A = DFDX::Identity();\n  DFDU B = DFDU::Random();\n  dynamics.setA(A);\n  dynamics.setB(B);\n\n  DFDX Jacobian_A;\n  DFDU Jacobian_B;\n  state_array x = dynamics.getZeroState();\n  control_array u = dynamics.getZeroControl();\n  dynamics.computeGrad(x, u, Jacobian_A, Jacobian_B);\n  float a_diff = (Jacobian_A - A).array().abs().sum();\n  float b_diff = (Jacobian_B - B).array().abs().sum();\n  ASSERT_EQ(a_diff, 0);\n  ASSERT_EQ(b_diff, 0);\n}\n\nTEST(Linear, HardCodeCPUTest)\n{\n  using DYN_T = LinearDynamics<3, 1>;\n  using DFDU = typename DYN_T::dfdu;\n  using DFDX = typename DYN_T::dfdx;\n  using state_array = typename DYN_T::state_array;\n  using control_array = typename DYN_T::control_array;\n  using output_array = typename DYN_T::output_array;\n\n  auto dynamics = DYN_T();\n  DFDX A = DFDX::Identity();\n  DFDU B = DFDU::Zero();\n  B(2, 0) = 0.5f;\n  dynamics.setA(A);\n  dynamics.setB(B);\n\n  state_array x = dynamics.getZeroState();\n  control_array u = dynamics.getZeroControl();\n  x << 1, 5, 10;\n  u << 3;\n  float dt = 0.1;\n  output_array y;\n  state_array x_der, x_next;\n  dynamics.step(x, x_next, x_der, u, y, 0, dt);\n  // Check derivative\n  ASSERT_FLOAT_EQ(x_der[0], 1.0f);\n  ASSERT_FLOAT_EQ(x_der[1], 5.0f);\n  ASSERT_FLOAT_EQ(x_der[2], 10.0f + 1.5f);\n\n  // Check next state\n  ASSERT_FLOAT_EQ(x_next[0], 1.0f + 1.0f * 0.1f);\n  ASSERT_FLOAT_EQ(x_next[1], 5.0f + 5.0f * 0.1f);\n  ASSERT_FLOAT_EQ(x_next[2], 10.0f + (10.0f + 1.5f) * 0.1f);\n\n  // Check output\n  ASSERT_FLOAT_EQ(y[0], 1.0f + 1.0f * 0.1f);\n  ASSERT_FLOAT_EQ(y[1], 5.0f + 5.0f * 0.1f);\n  ASSERT_FLOAT_EQ(y[2], 10.0f + (10.0f + 1.5f) * 0.1f);\n}\n"
  },
  {
    "path": "tests/dynamics/quadrotor_dynamics_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/dynamics/quadrotor/quadrotor_dynamics.cuh>\n#include <mppi/utils/test_helper.h>\n\ntemplate <class DYN_T>\nvoid __global__ DynamicsDerivKernel(DYN_T* model, float* state_d, float* u_d, float* state_deriv_d)\n{\n  model->computeDynamics(state_d, u_d, state_deriv_d);\n}\n\ntemplate <class DYN_T>\nvoid __global__ UpdateStateKernel(DYN_T* model, float* state_d, float* u_d, float* state_deriv_d, float dt)\n{\n  model->computeDynamics(state_d, u_d, state_deriv_d);\n  model->updateState(state_d, state_deriv_d, dt);\n}\n\nTEST(QuadrotorDynamics, Constructor)\n{\n  QuadrotorDynamics();\n}\n\nTEST(QuadrotorDynamics, CompareDynamics_CPU_GPU)\n{\n  using DYN = QuadrotorDynamics;\n  DYN model;\n\n  DYN::state_array s = DYN::state_array::Random();\n  DYN::control_array u = DYN::control_array::Random();\n  DYN::state_array state_deriv_cpu, state_deriv_gpu;\n\n  Eigen::Quaternionf q_test(s[6], s[7], s[8], s[9]);\n  q_test.normalize();\n  s[6] = q_test.w();\n  s[7] = q_test.x();\n  s[8] = q_test.y();\n  s[9] = q_test.z();\n\n  /**\n   * GPU Setup\n   */\n  model.GPUSetup();\n  cudaStream_t s1;\n  cudaStreamCreate(&s1);\n  float* s_d;\n  float* u_d;\n  float* state_deriv_GPU;\n  // Allocate GPU Memory\n  // size_t control_size = sizeof(float) * DYN::CONTROL_DIM;\n  HANDLE_ERROR(cudaMalloc((void**)&u_d, sizeof(float) * DYN::CONTROL_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&s_d, sizeof(float) * DYN::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&state_deriv_GPU, sizeof(float) * DYN::STATE_DIM));\n\n  // Copy data to GPU\n  HANDLE_ERROR(cudaMemcpyAsync(u_d, u.data(), sizeof(float) * DYN::CONTROL_DIM, cudaMemcpyHostToDevice, s1));\n  HANDLE_ERROR(cudaMemcpyAsync(s_d, s.data(), sizeof(float) * DYN::STATE_DIM, cudaMemcpyHostToDevice, s1));\n  HANDLE_ERROR(cudaStreamSynchronize(s1));\n\n  model.computeDynamics(s, u, state_deriv_cpu);\n\n  // Call GPU Kernel\n  dim3 dimBlock(1, 5, 1);\n  dim3 dimGrid(1, 1, 1);\n  DynamicsDerivKernel<DYN><<<dimGrid, dimBlock, 0, s1>>>(model.model_d_, s_d, u_d, state_deriv_GPU);\n  CudaCheckError();\n  HANDLE_ERROR(cudaStreamSynchronize(s1));\n  // Copy GPU results back to Host\n  HANDLE_ERROR(cudaMemcpyAsync(state_deriv_gpu.data(), state_deriv_GPU, sizeof(float) * DYN::STATE_DIM,\n                               cudaMemcpyDeviceToHost, s1));\n  HANDLE_ERROR(cudaStreamSynchronize(s1));\n  eigen_assert_float_eq<DYN::state_array>(state_deriv_cpu, state_deriv_gpu);\n}\n\nTEST(QuadrotorDynamics, UpdateState_CPU_GPU)\n{\n  using DYN = QuadrotorDynamics;\n  DYN model;\n  QuadrotorDynamicsParams params = QuadrotorDynamicsParams(2.5);\n\n  DYN::state_array s_cpu = DYN::state_array::Random();\n  DYN::control_array u = DYN::control_array::Random();\n  DYN::state_array s_gpu;\n  DYN::state_array state_deriv_cpu, state_deriv_gpu;\n  float dt = 0.01;\n\n  Eigen::Quaternionf q_test(s_cpu[6], s_cpu[7], s_cpu[8], s_cpu[9]);\n  q_test.normalize();\n  s_cpu[6] = q_test.w();\n  s_cpu[7] = q_test.x();\n  s_cpu[8] = q_test.y();\n  s_cpu[9] = q_test.z();\n\n  s_gpu = s_cpu;\n\n  /**\n   * GPU Setup\n   */\n  model.GPUSetup();\n  model.setParams(params);\n  cudaStream_t s1;\n  cudaStreamCreate(&s1);\n  float* s_d;\n  float* u_d;\n  float* state_deriv_GPU;\n  // Allocate GPU Memory\n  HANDLE_ERROR(cudaMalloc((void**)&u_d, sizeof(float) * DYN::CONTROL_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&s_d, sizeof(float) * DYN::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&state_deriv_GPU, sizeof(float) * DYN::STATE_DIM));\n\n  // Copy data to GPU\n  HANDLE_ERROR(cudaMemcpyAsync(u_d, u.data(), sizeof(float) * DYN::CONTROL_DIM, cudaMemcpyHostToDevice, s1));\n  HANDLE_ERROR(cudaMemcpyAsync(s_d, s_gpu.data(), sizeof(float) * DYN::STATE_DIM, cudaMemcpyHostToDevice, s1));\n  HANDLE_ERROR(cudaStreamSynchronize(s1));\n\n  model.computeDynamics(s_cpu, u, state_deriv_cpu);\n  model.updateState(s_cpu, state_deriv_cpu, dt);\n\n  // Call GPU Kernel\n  dim3 dimBlock(1, 5, 1);\n  dim3 dimGrid(1, 1, 1);\n  UpdateStateKernel<DYN><<<dimGrid, dimBlock, 0, s1>>>(model.model_d_, s_d, u_d, state_deriv_GPU, dt);\n  CudaCheckError();\n  HANDLE_ERROR(cudaStreamSynchronize(s1));\n  // Copy GPU results back to Host\n  HANDLE_ERROR(cudaMemcpyAsync(s_gpu.data(), s_d, sizeof(float) * DYN::STATE_DIM, cudaMemcpyDeviceToHost, s1));\n  HANDLE_ERROR(cudaMemcpyAsync(state_deriv_gpu.data(), state_deriv_GPU, sizeof(float) * DYN::STATE_DIM,\n                               cudaMemcpyDeviceToHost, s1));\n  HANDLE_ERROR(cudaStreamSynchronize(s1));\n  eigen_assert_float_eq<DYN::state_array>(s_cpu, s_gpu);\n}"
  },
  {
    "path": "tests/dynamics/racer_dubins_elevation_lstm_steering_model_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/racer_dubins/racer_dubins_elevation_lstm_steering.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <racer_test_networks.h>\n#include <cuda_runtime.h>\n\nclass RacerDubinsElevationLSTMSteeringTest : public ::testing::Test\n{\npublic:\n  cudaStream_t stream;\n\n  void SetUp() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamDestroy(stream));\n  }\n\n  std::vector<int> init_output_layers = { 23, 100, 8 };\n  std::vector<int> output_layers = { 8, 20, 1 };\n};\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, Template)\n{\n  auto dynamics = RacerDubinsElevationLSTMSteering(3, 20, init_output_layers, 4, 4, output_layers, 11);\n  EXPECT_EQ(19, RacerDubinsElevationLSTMSteering::STATE_DIM);\n  EXPECT_EQ(2, RacerDubinsElevationLSTMSteering::CONTROL_DIM);\n  EXPECT_TRUE(dynamics.checkRequiresBuffer());\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getBlkSharedSizeBytes(), 384);\n  EXPECT_EQ(dynamics.getGrdSharedSizeBytes(), 1408);\n}\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, BindStream)\n{\n  auto dynamics = RacerDubinsElevationLSTMSteering(3, 20, init_output_layers, 4, 4, output_layers, 11);\n  dynamics.bindToStream(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n  EXPECT_NE(dynamics.getHelper(), nullptr);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->stream_, stream);\n\n  auto dynamics2 = RacerDubinsElevationLSTMSteering(3, 20, init_output_layers, 4, 4, output_layers, 11, stream);\n\n  EXPECT_EQ(dynamics2.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics2.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics2.getTextureHelper()->stream_, stream);\n  EXPECT_NE(dynamics2.getHelper(), nullptr);\n  EXPECT_EQ(dynamics2.getHelper()->getLSTMModel()->stream_, stream);\n}\n\n/*\nfloat c_t = 1.3;\nfloat c_b = 2.5;\nfloat c_v = 3.7;\nfloat c_0 = 4.9;\nfloat wheel_base = 0.3;\n */\n\n// TEST_F(RacerDubinsElevationLSTMSteeringTest, ComputeDynamics)\n// {\n//   RacerDubinsElevationLSTMSteering dynamics = RacerDubinsElevationLSTMSteering();\n//   auto params = dynamics.getParams();\n//   RacerDubinsElevationLSTMSteering::state_array x = RacerDubinsElevationLSTMSteering::state_array::Zero();\n//   RacerDubinsElevationLSTMSteering::control_array u = RacerDubinsElevationLSTMSteering::control_array::Zero();\n\n//   // computeDynamics should not touch the roll/pitch element\n//   RacerDubinsElevationLSTMSteering::state_array next_x = RacerDubinsElevationLSTMSteering::state_array::Ones() *\n//   0.153; dynamics.computeDynamics(x, u, next_x); EXPECT_FLOAT_EQ(next_x(0), 4.9); EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 0);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 2.6 - 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.5 - 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -1, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 4.7 + 2.6 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -1, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.5 + 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.9 - 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.9 + 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 4.5 - 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 4.5 + 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 0, 1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (1 / .3) * tan(0));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 5 * 0.6);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -1, M_PI_2, 0, 3, 5.0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 4.7 + 3.5 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (-1 / .3) * tan(5.0 / -10.2));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), -1);\n//   EXPECT_FLOAT_EQ(next_x(4), -5);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -0.4, M_PI_2, 0, 3, 5.0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.7 * 0.4 + 2.5 * 0.4 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (-0.4 / .3) * tan(5.0 / -9.1));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), -0.4);\n//   EXPECT_FLOAT_EQ(next_x(4), -5);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 0.4, M_PI_2, 0, 3, 5.0, 0.5, -0.5, 0.0, 0.0;\n//   u << 0.1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7 * 0.4 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (0.4 / .3) * tan(5.0 / -9.1));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0.4);\n//   EXPECT_FLOAT_EQ(next_x(4), -5);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n// }\n\n// TEST_F(RacerDubinsElevationLSTMSteeringTest, TestModelGPU)\n// {\n//   RacerDubinsElevationLSTMSteering dynamics = RacerDubinsElevationLSTMSteering();\n//   dynamics.GPUSetup();\n//   CudaCheckError();\n\n//   Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM, 100> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM, 100>::Random();\n//   Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, 100> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, 100>::Random();\n\n//   std::vector<std::array<float, 9>> s(100);\n//   std::vector<std::array<float, 9>> s_der(100);\n//   // steering, throttle\n//   std::vector<std::array<float, 2>> u(100);\n//   for (int state_index = 0; state_index < s.size(); state_index++)\n//   {\n//     for (int dim = 0; dim < s[0].size(); dim++)\n//     {\n//       s[state_index][dim] = state_trajectory.col(state_index)(dim);\n//     }\n//     for (int dim = 0; dim < u[0].size(); dim++)\n//     {\n//       u[state_index][dim] = control_trajectory.col(state_index)(dim);\n//     }\n//   }\n\n//   // These variables will be changed so initialized to the right size only\n\n//   // Run dynamics on dynamicsU\n//   // Run dynamics on GPU\n//   for (int y_dim = 1; y_dim <= 4; y_dim++)\n//   {\n//     launchComputeDynamicsTestKernel<RacerDubinsElevationLSTMSteering, 9, 2>(dynamics, s, u, s_der, y_dim);\n//     for (int point = 0; point < 100; point++)\n//     {\n//       RacerDubinsElevationLSTMSteering::state_array state = state_trajectory.col(point);\n//       RacerDubinsElevationLSTMSteering::control_array control = control_trajectory.col(point);\n//       RacerDubinsElevationLSTMSteering::state_array state_der_cpu =\n//       RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n//       dynamics.computeDynamics(state, control, state_der_cpu);\n//       for (int dim = 0; dim < 6; dim++)\n//       {\n//         EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5)\n//             << \"at point \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n//         EXPECT_TRUE(isfinite(s_der[point][dim]));\n//       }\n//     }\n//   }\n\n//   dynamics.freeCudaMem();\n//   CudaCheckError();\n// }\n\n// TEST_F(RacerDubinsElevationLSTMSteeringTest, TestUpdateState)\n// {\n//   CudaCheckError();\n//   RacerDubinsElevationLSTMSteering dynamics = RacerDubinsElevationLSTMSteering();\n//   RacerDubinsElevationLSTMSteering::state_array state;\n//   RacerDubinsElevationLSTMSteering::state_array next_state;\n//   RacerDubinsElevationLSTMSteering::state_array state_der;\n\n//   // TODO add in the elevation map\n\n//   state << 0, 0, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, 1, 1, 1, 1, 0, 0;\n//   dynamics.updateState(state, next_state, state_der, 0.1);\n//   EXPECT_TRUE(state_der != RacerDubinsElevationLSTMSteering::state_array::Zero());\n//   EXPECT_FLOAT_EQ(next_state(0), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(1), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(2), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(3), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(4), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(5), 0.0);\n//   EXPECT_FLOAT_EQ(next_state(6), 0.0);\n\n//   state << 0, M_PI - 0.1, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, 1, 1, 1, 1;\n//   dynamics.updateState(state, next_state, state_der, 1.0);\n//   EXPECT_TRUE(state_der != RacerDubinsElevationLSTMSteering::state_array::Zero());\n//   EXPECT_FLOAT_EQ(next_state(0), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(1), 1.0 - M_PI - 0.1);\n//   EXPECT_FLOAT_EQ(next_state(2), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(3), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(4), 0.5);\n//   EXPECT_FLOAT_EQ(next_state(5), 0.0);\n//   EXPECT_FLOAT_EQ(next_state(6), 0.0);\n\n//   state << 0, -M_PI + 0.1, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, -1, 1, 1, 1;\n//   dynamics.updateState(state, next_state, state_der, 1.0);\n//   EXPECT_TRUE(state_der != RacerDubinsElevationLSTMSteering::state_array::Zero());\n//   EXPECT_FLOAT_EQ(next_state(0), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(1), M_PI + 0.1 - 1.0);\n//   EXPECT_FLOAT_EQ(next_state(2), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(3), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(4), 0.5);\n//   EXPECT_FLOAT_EQ(next_state(5), 0.0);\n//   EXPECT_FLOAT_EQ(next_state(6), 0.0);\n\n//   CudaCheckError();\n// }\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, TestStep)\n{\n  GTEST_SKIP() << \"Skipping test since they have not been updated to accel \";\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMSteering;\n  const float tol = 1e-6;\n  auto dynamics = RacerDubinsElevationLSTMSteering(3, 20, init_output_layers, 4, 4, output_layers, 11);\n  auto params = dynamics.getParams();\n  params.c_0 = 0;\n  params.c_b[0] = 1;\n  params.c_b[1] = 10;\n  params.c_b[2] = 100;\n  params.c_v[0] = 0.25;\n  params.c_v[1] = 0.5;\n  params.c_v[2] = 0.75;\n  params.c_t[0] = 2;\n  params.c_t[1] = 20;\n  params.c_t[2] = 200;\n  params.low_min_throttle = 0.2;\n  params.steer_command_angle_scale = 0.5;\n  params.steering_constant = 0.5;\n  params.wheel_base = 0.5;\n  params.max_steer_rate = 5;\n  params.max_steer_angle = 5;\n  dynamics.setParams(params);\n  DYN::state_array state;\n  DYN::state_array next_state;\n  DYN::state_array state_der = DYN::state_array::Zero();\n  DYN::control_array control;\n  DYN::output_array output;\n  float dt = 0.1;\n  // TODO add in the elevation map\n\n  auto model = dynamics.getHelper();\n  model->getLSTMModel()->getOutputModel()->setAllWeights(0.1f);\n  model->getInitModel()->getOutputModel()->setAllWeights(0.01f);\n  model->getLSTMModel()->setAllValues(0.3f);\n  model->getInitModel()->setAllValues(0.01f);\n\n  Eigen::MatrixXf buffer = model->getEmptyBufferMatrix();\n  buffer.setOnes();\n  buffer = buffer * 0.01f;\n\n  // Basic initial state and no movement should stay still\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), 0.0, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 4.1500520706176758 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 4.1500520706176758, tol);\n  EXPECT_NEAR(output(23), 0.0, tol);\n\n  // Apply full throttle from zero state\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 1.6, tol);\n  EXPECT_NEAR(next_state(0), 0.16, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 5.2751355171203613 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 5.2751355171203613, tol);\n  EXPECT_NEAR(output(23), 1.6, tol);\n\n  // Apply throttle to a state with positive velocity\n  state << 1, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 5.5, tol);\n  EXPECT_NEAR(next_state(0), 1.55, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.1, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 7.1901092529296875 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 7.1901092529296875, tol);\n  EXPECT_NEAR(output(23), 5.5, tol);\n\n  // Apply full throttle and half left turn to origin state\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 1.6, tol);\n  EXPECT_NEAR(next_state(0), 0.16, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 6.1967658996582031 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 6.1967658996582031, tol);\n  EXPECT_NEAR(output(23), 1.6, tol);\n\n  // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left\n  float yaw = M_PI / 6;\n  state << 1.0, yaw, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 5.5, tol);\n  EXPECT_NEAR(next_state(0), 1.55, tol);\n  EXPECT_NEAR(next_state(1), yaw, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), 9.0641689300537109 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 9.0641689300537109, tol);\n  EXPECT_NEAR(output(23), 5.5, tol);\n\n  // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left which is already turning\n  float steer_angle = M_PI / 8;\n  state << 1.0, yaw, 0, 0, steer_angle, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 5.5, tol);\n  EXPECT_NEAR(next_state(0), 1.55, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + 9.3808889389038086 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 9.3808889389038086, tol);\n  EXPECT_NEAR(output(23), 5.5, tol);\n\n  // Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n  state << 1.0, yaw, 0, 0, steer_angle, 1.0, -0.0, 0.0, 0, 0;\n  control << -1, 0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -5.5, tol);\n  EXPECT_NEAR(next_state(0), 1 - 5.5 * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + 9.3808889389038086 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 9.3808889389038086, tol);\n  EXPECT_NEAR(output(23), -5.5, tol);\n\n  /**\n   * Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n   * and on a downward facing hill\n   */\n  float pitch = 20 * M_PI / 180;\n  state << 1.0, yaw, 0, 0, steer_angle, 1, -0.0, pitch, 0, 0;\n  control << -1, 0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), 1 + (-5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + 9.3808889389038086 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 9.3808889389038086, tol);\n  EXPECT_NEAR(output(23), (-5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half left turn to a backwards moving state oriented 30 degrees to the left which is already\n   * turning and on a downward facing hill\n   */\n  state << -1.0, yaw, 0, 0, steer_angle, 1, -0.0, pitch, 0, 0;\n  control << -1, 0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + 3.5283551216125488 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 3.5283551216125488, tol);\n  EXPECT_NEAR(output(23), (5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half right turn to a backwards moving state oriented 30 degrees to the left which is already\n   * turning and on a downward facing hill\n   */\n  state << -1.0, yaw, 0, 0, steer_angle, 1, -0.0, pitch, 0, 0;\n  control << -1, -0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + -0.32771033048629761 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), -0.32771033048629761, tol);\n  EXPECT_NEAR(output(23), (5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half right turn to a backwards moving state with a huge steering angle to test max steer\n   * angle and steering rate. We are also on a downward facing hill and are already oriented 30 degrees to the left\n   */\n  steer_angle *= 100;\n  state << -1.0, yaw, 0, 0, steer_angle, 1, -0.0, pitch, 0, 0;\n  control << -1, -0.5;\n  model->initializeLSTM(buffer);\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + tan(steer_angle / -9.1) * dt * -2, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), params.max_steer_angle, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 15.97845268249511, tol);\n  EXPECT_NEAR(output(23), (5.5 + 9.81 * sinf(pitch)), tol);\n}\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, TestStepGPUvsCPUNoNetwork)\n{\n  const int num_rollouts = 1024;\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMSteering;\n  RacerDubinsElevationLSTMSteering dynamics =\n      RacerDubinsElevationLSTMSteering(3, 20, init_output_layers, 4, 4, output_layers, 11);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.network_d_, dynamics.getHelper()->getLSTMDevicePtr());\n\n  Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM, num_rollouts> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM, num_rollouts>::Random();\n  Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, num_rollouts> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, num_rollouts>::Random();\n\n  std::vector<std::array<float, RacerDubinsElevationLSTMSteering::STATE_DIM>> s(num_rollouts);\n  std::vector<std::array<float, RacerDubinsElevationLSTMSteering::STATE_DIM>> s_next(num_rollouts);\n  std::vector<std::array<float, RacerDubinsElevationLSTMSteering::STATE_DIM>> s_der(num_rollouts);\n  // steering, throttle\n  std::vector<std::array<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM>> u(num_rollouts);\n\n  RacerDubinsElevationLSTMSteering::state_array state;\n  RacerDubinsElevationLSTMSteering::state_array next_state_cpu;\n  RacerDubinsElevationLSTMSteering::control_array control;\n  RacerDubinsElevationLSTMSteering::output_array output;\n  RacerDubinsElevationLSTMSteering::state_array state_der_cpu = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 16; y_dim++)\n  {\n    DYN::buffer_trajectory buffer;\n    buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n    buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n    buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n    buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < s[0].size(); dim++)\n      {\n        s[state_index][dim] = state_trajectory.col(state_index)(dim);\n      }\n      for (int dim = 0; dim < u[0].size(); dim++)\n      {\n        u[state_index][dim] = control_trajectory.col(state_index)(dim);\n      }\n    }\n    dynamics.updateFromBuffer(buffer);\n    launchStepTestKernel<RacerDubinsElevationLSTMSteering, 16>(dynamics, s, u, s_der, s_next, 0, dt, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      dynamics.initializeDynamics(state, control, output, 0, 0);\n      state = state_trajectory.col(point);\n      control = control_trajectory.col(point);\n      state_der_cpu = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n      dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n      // for (int dim = 0; dim < RacerDubinsElevationLSTMSteering::STATE_DIM; dim++)\n      for (int dim = 0; dim < RacerDubinsElevationLSTMSteering::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-4)\n            << \"at index \" << point << \" with y_dim \" << y_dim << \" dim \" << dim;\n        // EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_NEAR(next_state_cpu(dim), s_next[point][dim], 1e-4)\n            << \"at index \" << point << \" with y_dim \" << y_dim << \" dim \" << dim;\n        EXPECT_TRUE(isfinite(s_next[point][dim]));\n      }\n    }\n  }\n  dynamics.freeCudaMem();\n}\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, TestStepGPUvsCPU)\n{\n  const int num_rollouts = 64;\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMSteering;\n  RacerDubinsElevationLSTMSteering dynamics = RacerDubinsElevationLSTMSteering(mppi::tests::steering_lstm);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.network_d_, dynamics.getHelper()->getLSTMDevicePtr());\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationLSTMSteering>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n\n// TODO assert they are different when a network is loaded, check params are the same\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, TestStepGPUvsCPUReverse)\n{\n  using DYN = RacerDubinsElevationLSTMSteering;\n\n  const int num_rollouts = 1024;\n  const float dt = 0.1f;\n  CudaCheckError();\n  RacerDubinsElevationLSTMSteering dynamics = RacerDubinsElevationLSTMSteering(mppi::tests::steering_lstm);\n  auto params = dynamics.getParams();\n  params.gear_sign = -1;\n  dynamics.setParams(params);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.network_d_, dynamics.getHelper()->getLSTMDevicePtr());\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationLSTMSteering>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, compareToElevationWithoutSteering)\n{\n  // by default the network will output zeros and not effect any states\n  using DYN = RacerDubinsElevationLSTMSteering;\n  using DYN_PARAMS = DYN::DYN_PARAMS_T;\n\n  const int num_rollouts = 1024;\n  const float dt = 0.1f;\n  CudaCheckError();\n  auto dynamics = RacerDubinsElevationLSTMSteering(3, 20, init_output_layers, 4, 4, output_layers, 11);\n  RacerDubinsElevation dynamics2 = RacerDubinsElevation();\n  auto params = dynamics.getParams();\n  dynamics.setParams(params);\n  dynamics2.setParams(params);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  TwoDTextureHelper<float>* helper2 = dynamics2.getTextureHelper();\n  helper2->setExtent(0, extent);\n\n  helper2->updateRotation(0, new_rot_mat);\n  helper2->updateOrigin(0, make_float3(1, 2, 3));\n\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n  helper2->updateTexture(0, data_vec);\n  helper2->updateResolution(0, 10);\n  helper2->enableTexture(0);\n  helper2->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  dynamics2.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM, num_rollouts> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::CONTROL_DIM, num_rollouts>::Random();\n  Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, num_rollouts> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, num_rollouts>::Random();\n\n  RacerDubinsElevationLSTMSteering::state_array state;\n  RacerDubinsElevationLSTMSteering::state_array next_state_cpu;\n  RacerDubinsElevationLSTMSteering::control_array control;\n  RacerDubinsElevationLSTMSteering::output_array output;\n  RacerDubinsElevationLSTMSteering::state_array state_der_cpu = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n  RacerDubinsElevationLSTMSteering::state_array state2;\n  RacerDubinsElevationLSTMSteering::state_array next_state_cpu2;\n  RacerDubinsElevationLSTMSteering::control_array control2;\n  RacerDubinsElevationLSTMSteering::output_array output2;\n  RacerDubinsElevationLSTMSteering::state_array state_der_cpu2 = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  dynamics.updateFromBuffer(buffer);\n  for (int point = 0; point < num_rollouts; point++)\n  {\n    dynamics.initializeDynamics(state, control, output, 0, 0);\n    state = state_trajectory.col(point);\n    control = control_trajectory.col(point);\n    state_der_cpu = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n    dynamics2.initializeDynamics(state2, control2, output2, 0, 0);\n    state2 = state_trajectory.col(point);\n    control2 = control_trajectory.col(point);\n    state_der_cpu2 = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n    dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n    dynamics2.step(state2, next_state_cpu2, state_der_cpu2, control2, output2, 0, dt);\n\n    for (int dim = 0; dim < RacerDubinsElevationLSTMSteering::STATE_DIM; dim++)\n    {\n      if (dim == S_IND_CLASS(DYN_PARAMS, STEER_ANGLE) or dim == S_IND_CLASS(DYN_PARAMS, STEER_ANGLE_RATE))\n      {  // this is done since the steering wheel setup is different, accel version\n        continue;\n      }\n      EXPECT_NEAR(state_der_cpu(dim), state_der_cpu2(dim), 1e-4) << \"state der at index \" << point << \" dim \" << dim;\n      EXPECT_NEAR(next_state_cpu(dim), next_state_cpu2(dim), 1e-4) << \"next state at index \" << point << \" dim \" << dim;\n    }\n    for (int dim = 0; dim < RacerDubinsElevationLSTMSteering::OUTPUT_DIM; dim++)\n    {\n      if (dim == O_IND_CLASS(DYN_PARAMS, STEER_ANGLE) or dim == O_IND_CLASS(DYN_PARAMS, STEER_ANGLE_RATE) or\n          dim == O_IND_CLASS(DYN_PARAMS, WHEEL_FORCE_UP_MAX) or dim == O_IND_CLASS(DYN_PARAMS, WHEEL_FORCE_FWD_MAX) or\n          dim == O_IND_CLASS(DYN_PARAMS, WHEEL_FORCE_SIDE_MAX) or dim == O_IND_CLASS(DYN_PARAMS, FILLER_1))\n      {  // this is done since the steering wheel setup is different, accel version\n        continue;\n      }\n      EXPECT_NEAR(output(dim), output2(dim), 1e-4) << \"output at index \" << point << \" dim \" << dim;\n    }\n  }\n\n  params.gear_sign = -1;\n  dynamics.setParams(params);\n  dynamics2.setParams(params);\n\n  // check in reverse as well\n  for (int point = 0; point < num_rollouts; point++)\n  {\n    dynamics.initializeDynamics(state, control, output, 0, 0);\n    state = state_trajectory.col(point);\n    control = control_trajectory.col(point);\n    state_der_cpu = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n    dynamics2.initializeDynamics(state2, control2, output2, 0, 0);\n    state2 = state_trajectory.col(point);\n    control2 = control_trajectory.col(point);\n    state_der_cpu2 = RacerDubinsElevationLSTMSteering::state_array::Zero();\n\n    dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n    dynamics2.step(state2, next_state_cpu2, state_der_cpu2, control2, output2, 0, dt);\n\n    for (int dim = 0; dim < RacerDubinsElevationLSTMSteering::STATE_DIM; dim++)\n    {\n      if (dim == S_IND_CLASS(DYN_PARAMS, STEER_ANGLE) or dim == S_IND_CLASS(DYN_PARAMS, STEER_ANGLE_RATE))\n      {  // this is done since the steering wheel setup is different, accel version\n        continue;\n      }\n      EXPECT_NEAR(state_der_cpu(dim), state_der_cpu2(dim), 1e-4) << \"at index \" << point << \" dim \" << dim;\n      EXPECT_NEAR(next_state_cpu(dim), next_state_cpu2(dim), 1e-4) << \"at index \" << point << \" dim \" << dim;\n    }\n    for (int dim = 0; dim < RacerDubinsElevationLSTMSteering::OUTPUT_DIM; dim++)\n    {\n      if (dim == O_IND_CLASS(DYN_PARAMS, STEER_ANGLE) or dim == O_IND_CLASS(DYN_PARAMS, STEER_ANGLE_RATE) or\n          dim == O_IND_CLASS(DYN_PARAMS, WHEEL_FORCE_UP_MAX) or dim == O_IND_CLASS(DYN_PARAMS, WHEEL_FORCE_FWD_MAX) or\n          dim == O_IND_CLASS(DYN_PARAMS, WHEEL_FORCE_SIDE_MAX) or dim == O_IND_CLASS(DYN_PARAMS, FILLER_1))\n      {  // this is done since the steering wheel setup is different, accel version\n        continue;\n      }\n      EXPECT_NEAR(output(dim), output2(dim), 1e-4) << \"at index \" << point << \" dim \" << dim;\n    }\n  }\n  dynamics.freeCudaMem();\n}\n\n/*\nclass LinearDummy : public RacerDubinsElevationLSTMSteering {\npublic:\n  bool computeGrad(const Eigen::Ref<const state_array> & state,\n                   const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A,\n                   Eigen::Ref<dfdu> B) {\n    return false;\n  };\n};\n\nTEST_F(RacerDubinsElevationLSTMSteeringTest, TestComputeGradComputation) {\n  Eigen::Matrix<float, RacerDubinsElevationLSTMSteering::STATE_DIM, RacerDubinsElevationLSTMSteering::STATE_DIM +\nRacerDubinsElevationLSTMSteering::CONTROL_DIM> numeric_jac; Eigen::Matrix<float,\nRacerDubinsElevationLSTMSteering::STATE_DIM, RacerDubinsElevationLSTMSteering::STATE_DIM +\nRacerDubinsElevationLSTMSteering::CONTROL_DIM> analytic_jac; RacerDubinsElevationLSTMSteering::state_array state; state\n<< 1, 2, 3, 4; RacerDubinsElevationLSTMSteering::control_array control; control << 5;\n\n  auto analytic_grad_model = RacerDubinsElevationLSTMSteering();\n\n  RacerDubinsElevationLSTMSteering::dfdx A_analytic = RacerDubinsElevationLSTMSteering::dfdx::Zero();\n  RacerDubinsElevationLSTMSteering::dfdu B_analytic = RacerDubinsElevationLSTMSteering::dfdu::Zero();\n\n  analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n\n  auto numerical_grad_model = LinearDummy();\n\n  std::shared_ptr<ModelWrapperDDP<LinearDummy>> ddp_model =\nstd::make_shared<ModelWrapperDDP<LinearDummy>>(&numerical_grad_model);\n\n  analytic_jac.leftCols<RacerDubinsElevationLSTMSteering::STATE_DIM>() = A_analytic;\n  analytic_jac.rightCols<RacerDubinsElevationLSTMSteering::CONTROL_DIM>() = B_analytic;\n  numeric_jac = ddp_model->df(state, control);\n\n  ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\" << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n<< analytic_jac;\n}\n\n*/\n"
  },
  {
    "path": "tests/dynamics/racer_dubins_elevation_lstm_uncertainty_model_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/racer_dubins/racer_dubins_elevation_lstm_unc.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <racer_test_networks.h>\n#include <cuda_runtime.h>\n#include <mppi/core/mppi_common.cuh>\n\nclass RacerDubinsElevationLSTMUncertaintyTest : public ::testing::Test\n{\npublic:\n  cudaStream_t stream;\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  float map_resolution = 10.0f;          // [m / px]\n  float3 origin = make_float3(1, 2, 3);  // [m, m, m]\n\n  std::vector<float> data_vec;\n  std::vector<float4> normal_vec;\n\n  void SetUp() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n\n    steer_config.init_config.input_dim = 3;\n    steer_config.init_config.hidden_dim = 20;\n    steer_config.init_config.output_layers = { 23, 100, 8 };\n    steer_config.pred_config.input_dim = 4;\n    steer_config.pred_config.hidden_dim = 4;\n    steer_config.pred_config.output_layers = { 8, 20, 1 };\n    steer_config.init_len = 11;\n\n    unc_config.init_config.input_dim = 10;\n    unc_config.init_config.hidden_dim = 20;\n    unc_config.init_config.output_layers = { 30, 100, 8 };\n    unc_config.pred_config.input_dim = 13;\n    unc_config.pred_config.hidden_dim = 4;\n    unc_config.pred_config.output_layers = { 17, 20, 5 };\n    unc_config.init_len = 11;\n\n    mean_config.init_config.input_dim = 10;\n    mean_config.init_config.hidden_dim = 20;\n    mean_config.init_config.output_layers = { 30, 100, 8 };\n    mean_config.pred_config.input_dim = 12;\n    mean_config.pred_config.hidden_dim = 4;\n    mean_config.pred_config.output_layers = { 16, 20, 2 };\n    mean_config.init_len = 11;\n\n    data_vec.resize(extent.width * extent.height);\n    normal_vec.resize(extent.width * extent.height);\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      data_vec[i] = i * 1.0f;\n      Eigen::Vector4f random_normal = Eigen::Vector4f::Random();\n      normal_vec[i] = make_float4(random_normal.x(), random_normal.y(), random_normal.z(), random_normal.w());\n    }\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamDestroy(stream));\n  }\n\n  LSTMLSTMConfig steer_config;\n  LSTMLSTMConfig mean_config;\n  LSTMLSTMConfig unc_config;\n};\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, Template)\n{\n  auto dynamics = RacerDubinsElevationLSTMUncertainty(steer_config, mean_config, unc_config);\n  EXPECT_EQ(26, RacerDubinsElevationLSTMUncertainty::STATE_DIM);\n  EXPECT_EQ(2, RacerDubinsElevationLSTMUncertainty::CONTROL_DIM);\n  EXPECT_TRUE(dynamics.checkRequiresBuffer());\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_NE(dynamics.getHelper(), nullptr);\n  EXPECT_NE(dynamics.getUncertaintyHelper(), nullptr);\n  EXPECT_NE(dynamics.getMeanHelper(), nullptr);\n}\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, BindStreamConstructor)\n{\n  auto dynamics = RacerDubinsElevationLSTMUncertainty(steer_config, mean_config, unc_config, stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->stream_, stream);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->stream_, stream);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->stream_, stream);\n}\n\n// TEST_F(RacerDubinsElevationLSTMUncertaintyTest, Deconstructor)\n// {\n//   {\n//     auto dynamics = RacerDubinsElevationLSTMUncertainty(steer_config, mean_config, unc_config);\n//     dynamics = RacerDubinsElevationLSTMUncertainty(steer_config, mean_config, unc_config);\n//   }\n// }\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, BindStream)\n{\n  auto dynamics = RacerDubinsElevationLSTMUncertainty(steer_config, mean_config, unc_config);\n  dynamics.bindToStream(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->stream_, stream);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->stream_, stream);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->stream_, stream);\n}\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, GPUSetup)\n{\n  auto dynamics = RacerDubinsElevationLSTMUncertainty(steer_config, mean_config, unc_config, stream);\n\n  dynamics.GPUSetup();\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->stream_, stream);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->stream_, stream);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->stream_, stream);\n\n  EXPECT_EQ(dynamics.GPUMemStatus_, true);\n  EXPECT_NE(dynamics.model_d_, nullptr);\n\n  // steering model\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->GPUMemStatus_, true);\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.getHelper()->getLSTMModel()->network_d_, nullptr);\n\n  // mean model\n  EXPECT_NE(dynamics.mean_d_, nullptr);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->GPUMemStatus_, true);\n  EXPECT_NE(dynamics.getMeanHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.getMeanHelper()->getLSTMModel()->network_d_, nullptr);\n\n  // mean model\n  EXPECT_NE(dynamics.uncertainty_d_, nullptr);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->GPUMemStatus_, true);\n  EXPECT_NE(dynamics.getUncertaintyHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.getUncertaintyHelper()->getLSTMModel()->network_d_, nullptr);\n}\n\n// TODO test case that checks that I get the same as base class here\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, TestLoadParams)\n{\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMUncertainty;\n  RacerDubinsElevationLSTMUncertainty dynamics =\n      RacerDubinsElevationLSTMUncertainty(mppi::tests::racer_dubins_elevation_uncertainty_test);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_angle, 5.0);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.30152702);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_brake_rate_pos, 10);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_brake_rate_neg, 10);\n  EXPECT_FLOAT_EQ(dynamics.getParams().pos_quad_brake_c[0], 3);\n  EXPECT_FLOAT_EQ(dynamics.getParams().pos_quad_brake_c[1], 0.1);\n  EXPECT_FLOAT_EQ(dynamics.getParams().pos_quad_brake_c[2], 0.48);\n  EXPECT_FLOAT_EQ(dynamics.getParams().neg_quad_brake_c[0], 5.8);\n  EXPECT_FLOAT_EQ(dynamics.getParams().neg_quad_brake_c[1], 0.1);\n  EXPECT_FLOAT_EQ(dynamics.getParams().neg_quad_brake_c[2], 1.4);\n\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_t[0], 3.0364573);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_t[1], 4.59772491);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_t[2], 4.06954288);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_b[0], 2.56373692);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_b[1], 6.18813848);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_b[2], 25.52443695);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_v[0], 4.39438224e-01);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_v[1], 2.37689335e-02);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_v[2], 2.12573977e-05);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_angle_scale, -13.25719738);\n  EXPECT_FLOAT_EQ(dynamics.getParams().gravity, -3.1667469);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[0], 0.5);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[1], 0.2);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[2], 0.5);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[3], 0.02);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[4], 0.02);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[5], 0.1);\n  EXPECT_FLOAT_EQ(dynamics.getParams().unc_scale[6], 0.1);\n\n  EXPECT_FLOAT_EQ(dynamics.getParams().low_min_throttle, 0.0);\n  EXPECT_FLOAT_EQ(dynamics.getParams().c_0, 0.0);\n  EXPECT_FLOAT_EQ(dynamics.getParams().wheel_base, 3.0);\n  EXPECT_GT(dynamics.getParams().clamp_ax, 10.0f);\n\n  EXPECT_EQ(dynamics.getHelper()->getInitModel()->getInputDim(), 3);\n  EXPECT_EQ(dynamics.getHelper()->getInitModel()->getOutputDim(), 8);\n  EXPECT_EQ(dynamics.getHelper()->getInitModel()->getHiddenDim(), 20);\n  EXPECT_EQ(dynamics.getHelper()->getInitModel()->getOutputModel()->getInputDim(), 23);\n  EXPECT_EQ(dynamics.getHelper()->getInitModel()->getOutputModel()->getOutputDim(), 8);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->getInputDim(), 4);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->getOutputDim(), 1);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->getHiddenDim(), 4);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->getOutputModel()->getInputDim(), 8);\n  EXPECT_EQ(dynamics.getHelper()->getLSTMModel()->getOutputModel()->getOutputDim(), 1);\n  EXPECT_EQ(dynamics.getHelper()->getInitLen(), 11);\n\n  EXPECT_EQ(dynamics.getMeanHelper()->getInitModel()->getInputDim(), 9);\n  EXPECT_EQ(dynamics.getMeanHelper()->getInitModel()->getOutputDim(), 8);\n  EXPECT_EQ(dynamics.getMeanHelper()->getInitModel()->getHiddenDim(), 20);\n  EXPECT_EQ(dynamics.getMeanHelper()->getInitModel()->getOutputModel()->getInputDim(), 29);\n  EXPECT_EQ(dynamics.getMeanHelper()->getInitModel()->getOutputModel()->getOutputDim(), 8);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->getInputDim(), 11);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->getOutputDim(), 2);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->getHiddenDim(), 4);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->getOutputModel()->getInputDim(), 15);\n  EXPECT_EQ(dynamics.getMeanHelper()->getLSTMModel()->getOutputModel()->getOutputDim(), 2);\n  EXPECT_EQ(dynamics.getMeanHelper()->getInitLen(), 11);\n\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getInitModel()->getInputDim(), 10);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getInitModel()->getOutputDim(), 8);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getInitModel()->getHiddenDim(), 20);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getInitModel()->getOutputModel()->getInputDim(), 30);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getInitModel()->getOutputModel()->getOutputDim(), 8);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->getInputDim(), 12);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->getOutputDim(), 7);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->getHiddenDim(), 4);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->getOutputModel()->getInputDim(), 16);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getLSTMModel()->getOutputModel()->getOutputDim(), 7);\n  EXPECT_EQ(dynamics.getUncertaintyHelper()->getInitLen(), 11);\n}\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, TestGPUvsCPU)\n{\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMUncertainty;\n  RacerDubinsElevationLSTMUncertainty dynamics =\n      RacerDubinsElevationLSTMUncertainty(mppi::tests::racer_dubins_elevation_uncertainty_test);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  TwoDTextureHelper<float4>* normal_helper = dynamics.getTextureHelperNormals();\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n  helper->updateTexture(0, data_vec, extent);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(false);\n  normal_helper->updateRotation(0, new_rot_mat);\n  normal_helper->updateOrigin(0, origin);\n  normal_helper->updateTexture(0, normal_vec, extent);\n  normal_helper->updateResolution(0, map_resolution);\n  normal_helper->enableTexture(0);\n  normal_helper->copyToDevice(true);\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_STEER_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"ROLL\"] = Eigen::VectorXf::Random(51);\n  buffer[\"PITCH\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_THROTTLE_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"BRAKE_STATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_BRAKE_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"OMEGA_Z\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationLSTMUncertainty>(dynamics, 0.02f, 16, 32, buffer, 1.0e-4);\n}\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, TestStepGPUvsCPUReverse)\n{\n  using DYN = RacerDubinsElevationLSTMUncertainty;\n\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMUncertainty;\n  RacerDubinsElevationLSTMUncertainty dynamics =\n      RacerDubinsElevationLSTMUncertainty(mppi::tests::racer_dubins_elevation_uncertainty_test);\n  auto params = dynamics.getParams();\n  params.gear_sign = -1;\n  dynamics.setParams(params);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  TwoDTextureHelper<float4>* normal_helper = dynamics.getTextureHelperNormals();\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n  helper->updateTexture(0, data_vec, extent);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(false);\n  normal_helper->updateRotation(0, new_rot_mat);\n  normal_helper->updateOrigin(0, origin);\n  normal_helper->updateTexture(0, normal_vec, extent);\n  normal_helper->updateResolution(0, map_resolution);\n  normal_helper->enableTexture(0);\n  normal_helper->copyToDevice(true);\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_STEER_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"ROLL\"] = Eigen::VectorXf::Random(51);\n  buffer[\"PITCH\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_THROTTLE_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"BRAKE_STATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_BRAKE_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"OMEGA_Z\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationLSTMUncertainty>(dynamics, 0.02f, 16, 32, buffer, 1.0e-2);\n}\n\nTEST_F(RacerDubinsElevationLSTMUncertaintyTest, TestMatchesPython)\n{\n  using DYN = RacerDubinsElevationLSTMUncertainty;\n\n  CudaCheckError();\n  using DYN = RacerDubinsElevationLSTMUncertainty;\n  RacerDubinsElevationLSTMUncertainty dynamics =\n      RacerDubinsElevationLSTMUncertainty(mppi::tests::racer_dubins_elevation_uncertainty_test);\n  auto params = dynamics.getParams();\n  params.K_x = 0.0f;\n  params.K_y = 0.0f;\n  params.K_yaw = 0.0f;\n  params.K_vel_x = 0.0f;\n  params.wheel_base = 3.0f;\n  dynamics.setParams(params);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  TwoDTextureHelper<float4>* normal_helper = dynamics.getTextureHelperNormals();\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n  helper->updateTexture(0, data_vec, extent);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(false);\n  normal_helper->updateRotation(0, new_rot_mat);\n  normal_helper->updateOrigin(0, origin);\n  normal_helper->updateTexture(0, normal_vec, extent);\n  normal_helper->updateResolution(0, map_resolution);\n  normal_helper->enableTexture(0);\n  normal_helper->copyToDevice(true);\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_STEER_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"ROLL\"] = Eigen::VectorXf::Random(51);\n  buffer[\"PITCH\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_THROTTLE_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"BRAKE_STATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"CAN_BRAKE_CMD\"] = Eigen::VectorXf::Random(51);\n  buffer[\"OMEGA_Z\"] = Eigen::VectorXf::Random(51);\n\n  EXPECT_TRUE(fileExists(mppi::tests::racer_dubins_elevation_uncertainty_test));\n  if (!fileExists(mppi::tests::racer_dubins_elevation_uncertainty_test))\n  {\n    std::cerr << \"Could not load neural net model at path: \"\n              << mppi::tests::racer_dubins_elevation_uncertainty_test.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(mppi::tests::racer_dubins_elevation_uncertainty_test);\n\n  float T = param_dict.at(\"T\").data<double>()[0];\n  float dt = param_dict.at(\"dt\").data<double>()[0];\n  float init_length = param_dict.at(\"init_length\").data<double>()[0] + 1;\n  int traj_length = T / dt;\n  int num_points = static_cast<int>(param_dict.at(\"num_points\").data<double>()[0]);\n  int input_dim = static_cast<int>(param_dict.at(\"input_dim\").data<double>()[0]);\n  int state_dim = static_cast<int>(param_dict.at(\"state_dim\").data<double>()[0]);\n  int state_dim_raw = static_cast<int>(param_dict.at(\"state_dim_raw\").data<double>()[0]);\n  int output_dim = static_cast<int>(param_dict.at(\"output_dim\").data<double>()[0]);\n  int output_dim_raw = static_cast<int>(param_dict.at(\"output_dim_raw\").data<double>()[0]);\n\n  double* init_inputs = param_dict.at(\"init_input\").data<double>();\n  double* init_states = param_dict.at(\"init_state\").data<double>();\n  double* states = param_dict.at(\"state\").data<double>();\n  double* inputs = param_dict.at(\"input\").data<double>();\n  double* outputs = param_dict.at(\"output\").data<double>();\n  // double* inputs = param_dict.at(\"input\").data<double>();\n  // double* outputs = param_dict.at(\"output\").data<double>();\n  double* init_hidden = nullptr;  // = param_dict.at(\"init/hidden\").data<double>();\n  double* init_cell = nullptr;    // = param_dict.at(\"init/cell\").data<double>();\n  int hidden_dim = 0;\n  // double* hidden = param_dict.at(\"hidden\").data<double>();\n  // double* cell = param_dict.at(\"cell\").data<double>();\n\n  for (int point = 0; point < num_points; point++)\n  {\n    // std::cout << \"\\n\\n\\n\\n\\n\\n\\n================= on point \" << point << \"=============\" << std::endl;\n    // sets up the buffer correctly\n    for (int t = 0; t < init_length; t++)\n    {\n      int buffer_index = (51 - init_length) + t;\n      int init_input_shift = point * init_length * input_dim + t * input_dim;\n      buffer[\"CAN_THROTTLE_CMD\"](buffer_index) = init_inputs[init_input_shift + 0];\n      buffer[\"CAN_BRAKE_CMD\"](buffer_index) = init_inputs[init_input_shift + 1];\n      buffer[\"CAN_STEER_CMD\"](buffer_index) = init_inputs[init_input_shift + 2];\n      buffer[\"PITCH\"](buffer_index) = init_inputs[init_input_shift + 3];\n      buffer[\"ROLL\"](buffer_index) = init_inputs[init_input_shift + 4];\n\n      int init_state_shift = point * init_length * state_dim_raw + t * state_dim_raw;\n      buffer[\"VEL_X\"](buffer_index) = init_states[init_state_shift + 3];\n      buffer[\"OMEGA_Z\"](buffer_index) = init_states[init_state_shift + 5];\n      buffer[\"BRAKE_STATE\"](buffer_index) = init_states[init_state_shift + 6];\n      buffer[\"STEER_ANGLE\"](buffer_index) = init_states[init_state_shift + 7];\n      buffer[\"STEER_ANGLE_RATE\"](buffer_index) = init_states[init_state_shift + 8];\n    }\n    typename DYN::state_array state = DYN::state_array::Zero();\n    typename DYN::state_array next_state = DYN::state_array::Zero();\n    typename DYN::control_array control = DYN::control_array::Zero();\n    typename DYN::state_array state_der = DYN::state_array::Zero();\n    typename DYN::output_array output_array = DYN::output_array::Zero();\n\n    int state_shift = point * traj_length * state_dim;\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, POS_X)) = states[state_shift + 0];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, POS_Y)) = states[state_shift + 1];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, YAW)) = states[state_shift + 2];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, VEL_X)) = states[state_shift + 3];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, OMEGA_Z)) = states[state_shift + 5];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE)) = states[state_shift + 6];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE)) = states[state_shift + 7];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE_RATE)) = states[state_shift + 8];\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_POS_X)) = 1.0e-5;\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_POS_Y)) = 1.0e-5;\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_YAW)) = 1.0e-5;\n    state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_VEL_X)) = 1.0e-5;\n\n    // std::cout << \"got initial state \" << state.transpose() << std::endl;\n\n    assert(state(S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE)) >= 0.0f &&\n           state(S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE)) <= 0.25f);\n    assert(state(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE)) >= -5 &&\n           state(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE)) <= 5);\n\n    if (dynamics.checkRequiresBuffer())\n    {\n      dynamics.updateFromBuffer(buffer);\n    }\n    dynamics.initializeDynamics(state, control, output_array, 0, dt);\n\n    // checks init values for hidden and cell states\n    hidden_dim = dynamics.getHelper()->getLSTMModel()->getHiddenDim();\n    init_hidden = param_dict.at(\"init/steering/hidden\").data<double>();\n    init_cell = param_dict.at(\"init/steering/cell\").data<double>();\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      EXPECT_NEAR(dynamics.getHelper()->getLSTMModel()->getHiddenState()[i], init_hidden[point * hidden_dim + i],\n                  1.0e-5);\n      EXPECT_NEAR(dynamics.getHelper()->getLSTMModel()->getCellState()[i], init_cell[point * hidden_dim + i], 1.0e-5);\n    }\n\n    hidden_dim = dynamics.getMeanHelper()->getLSTMModel()->getHiddenDim();\n    init_hidden = param_dict.at(\"init/terra/mean_network/hidden\").data<double>();\n    init_cell = param_dict.at(\"init/terra/mean_network/cell\").data<double>();\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      EXPECT_NEAR(dynamics.getMeanHelper()->getLSTMModel()->getHiddenState()[i], init_hidden[point * hidden_dim + i],\n                  1.0e-5);\n      EXPECT_NEAR(dynamics.getMeanHelper()->getLSTMModel()->getCellState()[i], init_cell[point * hidden_dim + i],\n                  1.0e-5);\n    }\n\n    hidden_dim = dynamics.getUncertaintyHelper()->getLSTMModel()->getHiddenDim();\n    init_hidden = param_dict.at(\"init/terra/uncertainty_network/hidden\").data<double>();\n    init_cell = param_dict.at(\"init/terra/uncertainty_network/cell\").data<double>();\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      EXPECT_NEAR(dynamics.getUncertaintyHelper()->getLSTMModel()->getHiddenState()[i],\n                  init_hidden[point * hidden_dim + i], 1.0e-5);\n      EXPECT_NEAR(dynamics.getUncertaintyHelper()->getLSTMModel()->getCellState()[i], init_cell[point * hidden_dim + i],\n                  1.0e-5);\n    }\n\n    // TODO find a way to scale the tol as a function of number of computations\n    // TODO scale tolerance by the number of timesteps\n\n    float tol = 1.0e-5;\n\n    // run init part of the networks and check hidden and cell states there\n    for (int t = 1; t < 50; t++)\n    {\n      int input_shift = point * traj_length * input_dim + (t - 1) * input_dim;\n      control(C_IND_CLASS(DYN::DYN_PARAMS_T, THROTTLE_BRAKE)) = inputs[input_shift + 0] - inputs[input_shift + 1];\n      control(C_IND_CLASS(DYN::DYN_PARAMS_T, STEER_CMD)) = inputs[input_shift + 2];\n      state(S_IND_CLASS(DYN::DYN_PARAMS_T, STATIC_PITCH)) = inputs[input_shift + 3];\n      state(S_IND_CLASS(DYN::DYN_PARAMS_T, STATIC_ROLL)) = inputs[input_shift + 4];\n      state(S_IND_CLASS(DYN::DYN_PARAMS_T, PITCH)) = inputs[input_shift + 3];\n      state(S_IND_CLASS(DYN::DYN_PARAMS_T, ROLL)) = inputs[input_shift + 4];\n\n      // std::cout << \"at time t \" << t - 1 << \" got control \" << control.transpose() << std::endl;\n      // std::cout << \"at time t \" << t - 1 << \" got state \" << state.transpose() << std::endl;\n      // std::cout << \"at time t \" << t - 1 << \" got vx \" << state(S_IND_CLASS(DYN::DYN_PARAMS_T, VEL_X)) << std::endl;\n\n      dynamics.step(state, next_state, state_der, control, output_array, 0.0f, dt);\n\n      int output_shift = point * traj_length * output_dim + (t - 1) * output_dim;\n      EXPECT_NEAR(state_der(S_IND_CLASS(DYN::DYN_PARAMS_T, VEL_X)), outputs[output_shift + 0], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, VEL_X) << \" at time \" << t;\n      EXPECT_NEAR(state_der(S_IND_CLASS(DYN::DYN_PARAMS_T, YAW)), outputs[output_shift + 2], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, YAW) << \" at time \" << t;\n      EXPECT_NEAR(state_der(S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE)), outputs[output_shift + 3], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE) << \" at time \"\n          << t;\n      EXPECT_NEAR(state_der(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE_RATE)), outputs[output_shift + 4], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE_RATE)\n          << \" at time \" << t;\n\n      state_shift = point * traj_length * state_dim + t * state_dim;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, POS_X)), states[state_shift + 0], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, POS_X) << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, POS_Y)), states[state_shift + 1], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, POS_Y) << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, YAW)), angle_utils::normalizeAngle(states[state_shift + 2]),\n                  tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, YAW) << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, VEL_X)), states[state_shift + 3], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, VEL_X) << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, OMEGA_Z)), states[state_shift + 5], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, OMEGA_Z) << \" at time \"\n          << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE)), states[state_shift + 6], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, BRAKE_STATE) << \" at time \"\n          << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE)), states[state_shift + 7], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE) << \" at time \"\n          << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE_RATE)), states[state_shift + 8], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, STEER_ANGLE_RATE)\n          << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_POS_X)), states[state_shift + state_dim_raw],\n                  tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_POS_X)\n          << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_POS_Y)),\n                  states[state_shift + state_dim_raw + 5], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_POS_Y)\n          << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_YAW)), states[state_shift + state_dim_raw + 10],\n                  tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_YAW)\n          << \" at time \" << t;\n      EXPECT_NEAR(next_state(S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_VEL_X)),\n                  states[state_shift + state_dim_raw + 15], tol)\n          << \"at sample \" << point << \", next state dim: \" << S_IND_CLASS(DYN::DYN_PARAMS_T, UNCERTAINTY_VEL_X)\n          << \" at time \" << t;\n\n      // checks init values for hidden and cell states\n      hidden_dim = dynamics.getHelper()->getLSTMModel()->getHiddenDim();\n      double* hidden = param_dict.at(\"pred/steering/hidden\").data<double>();\n      double* cell = param_dict.at(\"pred/steering/cell\").data<double>();\n      int hidden_shift = point * traj_length * hidden_dim + (t - 1) * hidden_dim;\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_NEAR(dynamics.getHelper()->getLSTMModel()->getHiddenState()[i], hidden[hidden_shift + i], tol)\n            << \"steering hidden lstm at point \" << point << \" dim \" << i;\n        EXPECT_NEAR(dynamics.getHelper()->getLSTMModel()->getCellState()[i], cell[hidden_shift + i], tol)\n            << \"steering cell lstm at point \" << point << \" dim \" << i;\n      }\n\n      hidden_dim = dynamics.getMeanHelper()->getLSTMModel()->getHiddenDim();\n      hidden = param_dict.at(\"pred/terra/mean_network/hidden\").data<double>();\n      cell = param_dict.at(\"pred/terra/mean_network/cell\").data<double>();\n      hidden_shift = point * traj_length * hidden_dim + (t - 1) * hidden_dim;\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_NEAR(dynamics.getMeanHelper()->getLSTMModel()->getHiddenState()[i], hidden[hidden_shift + i], tol)\n            << \"steering hidden lstm at point \" << point << \" dim \" << i;\n        EXPECT_NEAR(dynamics.getMeanHelper()->getLSTMModel()->getCellState()[i], cell[hidden_shift + i], tol)\n            << \"steering cell lstm at point \" << point << \" dim \" << i;\n      }\n\n      hidden_dim = dynamics.getUncertaintyHelper()->getLSTMModel()->getHiddenDim();\n      hidden = param_dict.at(\"pred/terra/uncertainty_network/hidden\").data<double>();\n      cell = param_dict.at(\"pred/terra/uncertainty_network/cell\").data<double>();\n      hidden_shift = point * traj_length * hidden_dim + (t - 1) * hidden_dim;\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_NEAR(dynamics.getUncertaintyHelper()->getLSTMModel()->getHiddenState()[i], hidden[hidden_shift + i], tol)\n            << \"steering hidden lstm at point \" << point << \" dim \" << i;\n        EXPECT_NEAR(dynamics.getUncertaintyHelper()->getLSTMModel()->getCellState()[i], cell[hidden_shift + i], tol)\n            << \"steering cell lstm at point \" << point << \" dim \" << i;\n      }\n\n      state = next_state;\n\n      tol += 5.0e-4 / traj_length;\n    }\n  }\n}\n"
  },
  {
    "path": "tests/dynamics/racer_dubins_elevation_model_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/racer_dubins/racer_dubins_elevation.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <cuda_runtime.h>\n\nclass RacerDubinsElevationTest : public ::testing::Test\n{\npublic:\n  cudaStream_t stream;\n\n  void SetUp() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamDestroy(stream));\n  }\n};\n\nTEST_F(RacerDubinsElevationTest, Template)\n{\n  auto dynamics = RacerDubinsElevation();\n  EXPECT_EQ(19, RacerDubinsElevation::STATE_DIM);\n  EXPECT_EQ(2, RacerDubinsElevation::CONTROL_DIM);\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n}\n\nTEST_F(RacerDubinsElevationTest, BindStream)\n{\n  auto dynamics = RacerDubinsElevation(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n}\n\n/*\nfloat c_t = 1.3;\nfloat c_b = 2.5;\nfloat c_v = 3.7;\nfloat c_0 = 4.9;\nfloat wheel_base = 0.3;\n */\n\n// TEST_F(RacerDubinsElevationTest, ComputeDynamics)\n// {\n//   RacerDubinsElevation dynamics = RacerDubinsElevation();\n//   auto params = dynamics.getParams();\n//   RacerDubinsElevation::state_array x = RacerDubinsElevation::state_array::Zero();\n//   RacerDubinsElevation::control_array u = RacerDubinsElevation::control_array::Zero();\n\n//   // computeDynamics should not touch the roll/pitch element\n//   RacerDubinsElevation::state_array next_x = RacerDubinsElevation::state_array::Ones() * 0.153;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 0);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 2.6 - 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.5 - 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -1, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 4.7 + 2.6 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -1, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.5 + 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.9 - 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.9 + 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 4.5 - 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -7, 0, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 4.5 + 5.7 * 7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5, 0.0, 0.0;\n//   u << 0, 1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 4.7 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (1 / .3) * tan(0));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 5 * 0.6);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -1, M_PI_2, 0, 3, 5.0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 4.7 + 3.5 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (-1 / .3) * tan(5.0 / -10.2));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), -1);\n//   EXPECT_FLOAT_EQ(next_x(4), -5);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << -0.4, M_PI_2, 0, 3, 5.0, 0.5, -0.5, 0.0, 0.0;\n//   u << -1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.7 * 0.4 + 2.5 * 0.4 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (-0.4 / .3) * tan(5.0 / -9.1));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), -0.4);\n//   EXPECT_FLOAT_EQ(next_x(4), -5);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n\n//   x << 0.4, M_PI_2, 0, 3, 5.0, 0.5, -0.5, 0.0, 0.0;\n//   u << 0.1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7 * 0.4 - sinf(-0.5) * -9.81);\n//   EXPECT_FLOAT_EQ(next_x(1), (0.4 / .3) * tan(5.0 / -9.1));\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 0.4);\n//   EXPECT_FLOAT_EQ(next_x(4), -5);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n// }\n\n// TEST_F(RacerDubinsElevationTest, TestModelGPU)\n// {\n//   RacerDubinsElevation dynamics = RacerDubinsElevation();\n//   dynamics.GPUSetup();\n//   CudaCheckError();\n\n//   Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, 100> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, 100>::Random();\n//   Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, 100> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, 100>::Random();\n\n//   std::vector<std::array<float, 9>> s(100);\n//   std::vector<std::array<float, 9>> s_der(100);\n//   // steering, throttle\n//   std::vector<std::array<float, 2>> u(100);\n//   for (int state_index = 0; state_index < s.size(); state_index++)\n//   {\n//     for (int dim = 0; dim < s[0].size(); dim++)\n//     {\n//       s[state_index][dim] = state_trajectory.col(state_index)(dim);\n//     }\n//     for (int dim = 0; dim < u[0].size(); dim++)\n//     {\n//       u[state_index][dim] = control_trajectory.col(state_index)(dim);\n//     }\n//   }\n\n//   // These variables will be changed so initialized to the right size only\n\n//   // Run dynamics on dynamicsU\n//   // Run dynamics on GPU\n//   for (int y_dim = 1; y_dim <= 4; y_dim++)\n//   {\n//     launchComputeDynamicsTestKernel<RacerDubinsElevation, 9, 2>(dynamics, s, u, s_der, y_dim);\n//     for (int point = 0; point < 100; point++)\n//     {\n//       RacerDubinsElevation::state_array state = state_trajectory.col(point);\n//       RacerDubinsElevation::control_array control = control_trajectory.col(point);\n//       RacerDubinsElevation::state_array state_der_cpu = RacerDubinsElevation::state_array::Zero();\n\n//       dynamics.computeDynamics(state, control, state_der_cpu);\n//       for (int dim = 0; dim < 6; dim++)\n//       {\n//         EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5)\n//             << \"at point \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n//         EXPECT_TRUE(isfinite(s_der[point][dim]));\n//       }\n//     }\n//   }\n\n//   dynamics.freeCudaMem();\n//   CudaCheckError();\n// }\n\n// TEST_F(RacerDubinsElevationTest, TestUpdateState)\n// {\n//   CudaCheckError();\n//   RacerDubinsElevation dynamics = RacerDubinsElevation();\n//   RacerDubinsElevation::state_array state;\n//   RacerDubinsElevation::state_array next_state;\n//   RacerDubinsElevation::state_array state_der;\n\n//   // TODO add in the elevation map\n\n//   state << 0, 0, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, 1, 1, 1, 1, 0, 0;\n//   dynamics.updateState(state, next_state, state_der, 0.1);\n//   EXPECT_TRUE(state_der != RacerDubinsElevation::state_array::Zero());\n//   EXPECT_FLOAT_EQ(next_state(0), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(1), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(2), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(3), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(4), 0.1);\n//   EXPECT_FLOAT_EQ(next_state(5), 0.0);\n//   EXPECT_FLOAT_EQ(next_state(6), 0.0);\n\n//   state << 0, M_PI - 0.1, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, 1, 1, 1, 1;\n//   dynamics.updateState(state, next_state, state_der, 1.0);\n//   EXPECT_TRUE(state_der != RacerDubinsElevation::state_array::Zero());\n//   EXPECT_FLOAT_EQ(next_state(0), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(1), 1.0 - M_PI - 0.1);\n//   EXPECT_FLOAT_EQ(next_state(2), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(3), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(4), 0.5);\n//   EXPECT_FLOAT_EQ(next_state(5), 0.0);\n//   EXPECT_FLOAT_EQ(next_state(6), 0.0);\n\n//   state << 0, -M_PI + 0.1, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, -1, 1, 1, 1;\n//   dynamics.updateState(state, next_state, state_der, 1.0);\n//   EXPECT_TRUE(state_der != RacerDubinsElevation::state_array::Zero());\n//   EXPECT_FLOAT_EQ(next_state(0), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(1), M_PI + 0.1 - 1.0);\n//   EXPECT_FLOAT_EQ(next_state(2), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(3), 1.0);\n//   EXPECT_FLOAT_EQ(next_state(4), 0.5);\n//   EXPECT_FLOAT_EQ(next_state(5), 0.0);\n//   EXPECT_FLOAT_EQ(next_state(6), 0.0);\n\n//   CudaCheckError();\n// }\n\nTEST_F(RacerDubinsElevationTest, TestStep)\n{\n  CudaCheckError();\n  using DYN = RacerDubinsElevation;\n  const float tol = 1e-6;\n  DYN dynamics = DYN();\n  auto params = dynamics.getParams();\n  params.c_0 = 0;\n  params.c_b[0] = 1;\n  params.c_b[1] = 10;\n  params.c_b[2] = 100;\n  params.c_v[0] = 0.25;\n  params.c_v[1] = 0.5;\n  params.c_v[2] = 0.75;\n  params.c_t[0] = 2;\n  params.c_t[1] = 20;\n  params.c_t[2] = 200;\n  params.low_min_throttle = 0.2;\n  params.steer_command_angle_scale = 0.5;\n  params.steering_constant = 0.5;\n  params.wheel_base = 0.5;\n  params.max_steer_rate = 5;\n  dynamics.setParams(params);\n  DYN::state_array state;\n  DYN::state_array next_state;\n  DYN::state_array state_der = DYN::state_array::Zero();\n  DYN::control_array control;\n  DYN::output_array output;\n  float dt = 0.1;\n  // TODO add in the elevation map\n\n  // Basic initial state and no movement should stay still\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_TRUE(state_der == DYN::state_array::Zero());\n  EXPECT_NEAR(next_state(0), 0.0, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 0.0, tol);\n\n  // check the first index of throttle\n  state << 0.54, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0.21, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -0.115, tol);\n  EXPECT_NEAR(next_state(0), 0.5285, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.054, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -0.115, tol);\n\n  // check the start of second index of throttle\n  state << 0.56, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0.01, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -0.08, tol);\n  EXPECT_NEAR(next_state(0), 0.552, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.056, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -0.08, tol);\n\n  // check the start of second index of throttle with different dt\n  state << 0.56, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0.21, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, 0.2);\n  EXPECT_NEAR(state_der(0), -0.12, tol);\n  EXPECT_NEAR(next_state(0), 0.536, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.112, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -0.12, tol);\n\n  // check the end of second index of throttle\n  state << 2.99, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0.01, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -1.295, tol);\n  EXPECT_NEAR(next_state(0), 2.8605, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.299, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -1.295, tol);\n\n  // check the end of second index of throttle\n  state << 3.01, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0.01, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -0.2575, tol);\n  EXPECT_NEAR(next_state(0), 2.98425, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.301, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -0.2575, tol);\n\n  // Apply full throttle from zero state\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 1.6, tol);\n  EXPECT_NEAR(next_state(0), 0.16, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 1.6, tol);\n\n  // Apply throttle to a state with positive velocity\n  state << 1, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 5.5, tol);\n  EXPECT_NEAR(next_state(0), 1.55, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.1, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 5.5, tol);\n\n  // Apply full throttle and half left turn to origin state\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 1.6, tol);\n  EXPECT_NEAR(next_state(0), 0.16, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), powf(0.5, 3) * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), powf(0.5, 3), tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 1.6, tol);\n\n  // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left\n  float yaw = M_PI / 6;\n  state << 1.0, yaw, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 5.5, tol);\n  EXPECT_NEAR(next_state(0), 1.55, tol);\n  EXPECT_NEAR(next_state(1), yaw, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), powf(0.5, 3) * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), powf(0.5, 3), tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 5.5, tol);\n\n  // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left which is already turning\n  float steer_angle = M_PI / 8;\n  state << 1.0, yaw, 0, 0, steer_angle, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), 5.5, tol);\n  EXPECT_NEAR(state_der(1), -0.086361105, tol);\n  EXPECT_NEAR(next_state(0), 1.55, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 5.5, tol);\n\n  // Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n  state << 1.0, yaw, 0, 0, steer_angle, 1.0, 0.0, 0, 0;\n  control << -1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -5.5, tol);\n  EXPECT_NEAR(next_state(0), 1 - 5.5 * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -5.5, tol);\n\n  /**\n   * Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n   * and on a downward facing hill\n   */\n  float pitch = 20 * M_PI / 180;\n  state << 1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), 1 + (-5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (-5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half left turn to a backwards moving state oriented 30 degrees to the left which is already\n   * turning and on a downward facing hill\n   */\n  state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(state_der(1), 0.086361105, tol);\n  EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half right turn to a backwards moving state oriented 30 degrees to the left which is already\n   * turning and on a downward facing hill\n   */\n  state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, -0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (-0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (-0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half right turn to a backwards moving state with a huge steering angle to test max steer\n   * angle and steering rate. We are also on a downward facing hill and are already oriented 30 degrees to the left\n   */\n  steer_angle *= 100;\n  state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, -0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + tan(steer_angle / -9.1) * dt * -2, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), params.max_steer_angle, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), -params.max_steer_rate, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (5.5 + 9.81 * sinf(pitch)), tol);\n}\n\nTEST_F(RacerDubinsElevationTest, TestStepGPUvsCPU)\n{\n  const int num_rollouts = 1000;\n  const float dt = 0.1f;\n  CudaCheckError();\n  RacerDubinsElevation dynamics = RacerDubinsElevation();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, num_rollouts> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, num_rollouts>::Random();\n  Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, num_rollouts> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, num_rollouts>::Random();\n\n  std::vector<std::array<float, RacerDubinsElevation::STATE_DIM>> s(num_rollouts);\n  std::vector<std::array<float, RacerDubinsElevation::STATE_DIM>> s_next(num_rollouts);\n  std::vector<std::array<float, RacerDubinsElevation::STATE_DIM>> s_der(num_rollouts);\n  // steering, throttle\n  std::vector<std::array<float, RacerDubinsElevation::CONTROL_DIM>> u(num_rollouts);\n\n  RacerDubinsElevation::state_array state;\n  RacerDubinsElevation::state_array next_state_cpu;\n  RacerDubinsElevation::control_array control;\n  RacerDubinsElevation::output_array output;\n  RacerDubinsElevation::state_array state_der_cpu = RacerDubinsElevation::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < s[0].size(); dim++)\n      {\n        s[state_index][dim] = state_trajectory.col(state_index)(dim);\n      }\n      for (int dim = 0; dim < u[0].size(); dim++)\n      {\n        u[state_index][dim] = control_trajectory.col(state_index)(dim);\n      }\n    }\n\n    launchStepTestKernel<RacerDubinsElevation>(dynamics, s, u, s_der, s_next, 0, dt, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      state = state_trajectory.col(point);\n      control = control_trajectory.col(point);\n      state_der_cpu = RacerDubinsElevation::state_array::Zero();\n\n      dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n      for (int dim = 0; dim < RacerDubinsElevation::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        // EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_NEAR(next_state_cpu(dim), s_next[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_next[point][dim]));\n      }\n    }\n  }\n  dynamics.freeCudaMem();\n}\n\nTEST_F(RacerDubinsElevationTest, TestStepReverse)\n{\n  CudaCheckError();\n  using DYN = RacerDubinsElevation;\n  const float tol = 1e-6;\n  DYN dynamics = DYN();\n  auto params = dynamics.getParams();\n  params.c_0 = 0;\n  params.c_b[0] = 1;\n  params.c_b[1] = 10;\n  params.c_b[2] = 100;\n  params.c_v[0] = 0.25;\n  params.c_v[1] = 0.5;\n  params.c_v[2] = 0.75;\n  params.c_t[0] = 2;\n  params.c_t[1] = 20;\n  params.c_t[2] = 200;\n  params.low_min_throttle = 0.2;\n  params.steer_command_angle_scale = 0.5;\n  params.steering_constant = 0.5;\n  params.wheel_base = 0.5;\n  params.max_steer_rate = 5;\n  params.gear_sign = -1;\n  dynamics.setParams(params);\n  DYN::state_array state;\n  DYN::state_array next_state;\n  DYN::state_array state_der = DYN::state_array::Zero();\n  DYN::control_array control;\n  DYN::output_array output;\n  float dt = 0.1;\n  // TODO add in the elevation map\n\n  // Basic initial state and no movement should stay still\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 0, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_TRUE(state_der == DYN::state_array::Zero());\n  EXPECT_NEAR(next_state(0), 0.0, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), 0.0, tol);\n\n  // Apply full throttle from zero state\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -1.6, tol);\n  EXPECT_NEAR(next_state(0), -0.16, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -1.6, tol);\n\n  // Apply throttle to a state with positive velocity\n  state << 1, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -5.5, tol);\n  EXPECT_NEAR(next_state(0), 0.45, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.1, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), 0.0, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), 0.0, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -5.5, tol);\n\n  // Apply full throttle and half left turn to origin state\n  state << 0, 0, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -1.6, tol);\n  EXPECT_NEAR(next_state(0), -0.16, tol);\n  EXPECT_NEAR(next_state(1), 0.0, tol);\n  EXPECT_NEAR(next_state(2), 0.0, tol);\n  EXPECT_NEAR(next_state(3), 0.0, tol);\n  EXPECT_NEAR(next_state(4), powf(0.5, 3) * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), powf(0.5, 3), tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -1.6, tol);\n\n  // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left\n  float yaw = M_PI / 6;\n  state << 1.0, yaw, 0, 0, 0, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -5.5, tol);\n  EXPECT_NEAR(next_state(0), 0.45, tol);\n  EXPECT_NEAR(next_state(1), yaw, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), powf(0.5, 3) * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), powf(0.5, 3), tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -5.5, tol);\n\n  // Apply full throttle and half left turn to a moving state oriented 30 degrees to the left which is already turning\n  float steer_angle = M_PI / 8;\n  state << 1.0, yaw, 0, 0, steer_angle, -0.0, 0.0, 0, 0;\n  control << 1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -5.5, tol);\n  EXPECT_NEAR(next_state(0), 0.45, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 0.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -5.5, tol);\n\n  // Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n  state << 1.0, yaw, 0, 0, steer_angle, 1.0, 0, 0.0, 0, 0;\n  control << -1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(state_der(0), -5.5, tol);\n  EXPECT_NEAR(next_state(0), 1 - 5.5 * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), -5.5, tol);\n\n  /**\n   * Apply full brake and half left turn to a moving state oriented 30 degrees to the left which is already turning\n   * and on a downward facing hill\n   */\n  float pitch = 20 * M_PI / 180;\n  state << 1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), 1 + (-5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + -0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), 1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), 1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (-5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half left turn to a backwards moving state oriented 30 degrees to the left which is already\n   * turning and on a downward facing hill\n   */\n  state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, 0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half right turn to a backwards moving state oriented 30 degrees to the left which is already\n   * turning and on a downward facing hill\n   */\n  state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, -0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + 0.086361105 * dt, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), steer_angle + (-0.25 - steer_angle) * 0.5 * dt, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), (-0.25 - steer_angle) * 0.5, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (5.5 + 9.81 * sinf(pitch)), tol);\n\n  /**\n   * Apply full brake and half right turn to a backwards moving state with a huge steering angle to test max steer\n   * angle and steering rate. We are also on a downward facing hill and are already oriented 30 degrees to the left\n   */\n  steer_angle *= 100;\n  state << -1.0, yaw, 0, 0, steer_angle, 1.0, 0, pitch, 0, 0;\n  control << -1, -0.5;\n  dynamics.step(state, next_state, state_der, control, output, 0, dt);\n  EXPECT_NEAR(next_state(0), -1 + (5.5 + 9.81 * sinf(pitch)) * dt, tol);\n  EXPECT_NEAR(next_state(1), yaw + tan(steer_angle / -9.1) * dt * -2, tol);\n  EXPECT_NEAR(next_state(2), -1 * cos(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(3), -1 * sin(yaw) * dt, tol);\n  EXPECT_NEAR(next_state(4), params.max_steer_angle, tol);\n  EXPECT_NEAR(next_state(5), 1.0, tol);\n  EXPECT_NEAR(next_state(6), 0.0, tol);\n  EXPECT_NEAR(next_state(7), 0.0, tol);\n  EXPECT_NEAR(next_state(8), -params.max_steer_rate, tol);\n  EXPECT_NEAR(output(O_IND_CLASS(DYN::DYN_PARAMS_T, ACCEL_X)), (5.5 + 9.81 * sinf(pitch)), tol);\n}\n\nTEST_F(RacerDubinsElevationTest, TestStepGPUvsCPUReverse)\n{\n  const int num_rollouts = 1000;\n  const float dt = 0.1f;\n  CudaCheckError();\n  RacerDubinsElevation dynamics = RacerDubinsElevation();\n  auto params = dynamics.getParams();\n  params.gear_sign = -1;\n  dynamics.setParams(params);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::vector<float> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = i * 1.0f;\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, make_float3(1, 2, 3));\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, 10);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, num_rollouts> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, num_rollouts>::Random();\n  Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, num_rollouts> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, num_rollouts>::Random();\n\n  std::vector<std::array<float, RacerDubinsElevation::STATE_DIM>> s(num_rollouts);\n  std::vector<std::array<float, RacerDubinsElevation::STATE_DIM>> s_next(num_rollouts);\n  std::vector<std::array<float, RacerDubinsElevation::STATE_DIM>> s_der(num_rollouts);\n  // steering, throttle\n  std::vector<std::array<float, RacerDubinsElevation::CONTROL_DIM>> u(num_rollouts);\n\n  RacerDubinsElevation::state_array state;\n  RacerDubinsElevation::state_array next_state_cpu;\n  RacerDubinsElevation::control_array control;\n  RacerDubinsElevation::output_array output;\n  RacerDubinsElevation::state_array state_der_cpu = RacerDubinsElevation::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < s[0].size(); dim++)\n      {\n        s[state_index][dim] = state_trajectory.col(state_index)(dim);\n      }\n      for (int dim = 0; dim < u[0].size(); dim++)\n      {\n        u[state_index][dim] = control_trajectory.col(state_index)(dim);\n      }\n    }\n\n    launchStepTestKernel<RacerDubinsElevation>(dynamics, s, u, s_der, s_next, 0, dt, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      state = state_trajectory.col(point);\n      control = control_trajectory.col(point);\n      state_der_cpu = RacerDubinsElevation::state_array::Zero();\n\n      dynamics.step(state, next_state_cpu, state_der_cpu, control, output, 0, dt);\n      for (int dim = 0; dim < RacerDubinsElevation::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        // EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_NEAR(next_state_cpu(dim), s_next[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_next[point][dim]));\n      }\n    }\n  }\n  dynamics.freeCudaMem();\n}\n\nTEST_F(RacerDubinsElevationTest, ComputeStateTrajectoryFiniteTest)\n{\n  RacerDubinsElevation dynamics = RacerDubinsElevation();\n  using PARAMS = RacerDubinsElevation::DYN_PARAMS_T;\n  PARAMS params;\n  params.c_t[0] = 3.0;\n  params.c_b[0] = 0.2;\n  params.c_v[0] = 0.2;\n  params.c_0 = 0.2;\n  params.wheel_base = 3.0;\n  params.steering_constant = 1.0;\n  dynamics.setParams(params);\n\n  Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, 500> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, 500>::Zero();\n  Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, 500> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, 500>::Zero();\n  RacerDubinsElevation::state_array state_der;\n  RacerDubinsElevation::state_array x, x_next;\n  RacerDubinsElevation::output_array output;\n  x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -0.000735827;\n\n  for (int i = 0; i < 500; i++)\n  {\n    RacerDubinsElevation::control_array u = control_trajectory.col(i);\n    dynamics.step(x, x_next, state_der, u, output, i, 0.02);\n    dynamics.computeDynamics(x, u, state_der);\n    EXPECT_TRUE(x.allFinite());\n    EXPECT_TRUE(x_next.allFinite());\n    EXPECT_TRUE(state_der.allFinite());\n    EXPECT_TRUE(u.allFinite());\n    EXPECT_TRUE(state_der != RacerDubinsElevation::state_array::Zero());\n    x = x_next;\n  }\n  params.steering_constant = 0.5;\n  dynamics.setParams(params);\n\n  x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -1.0;\n  for (int i = 0; i < 500; i++)\n  {\n    RacerDubinsElevation::control_array u = control_trajectory.col(i);\n    dynamics.step(x, x_next, state_der, u, output, i, 0.02);\n    dynamics.computeDynamics(x, u, state_der);\n    EXPECT_TRUE(x.allFinite());\n    EXPECT_TRUE(x_next.allFinite());\n    EXPECT_TRUE(state_der.allFinite());\n    EXPECT_TRUE(u.allFinite());\n    EXPECT_TRUE(state_der != RacerDubinsElevation::state_array::Zero());\n    x = x_next;\n  }\n}\n\nclass LinearDummy : public RacerDubinsElevation\n{\npublic:\n  bool computeGrad(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A, Eigen::Ref<dfdu> B)\n  {\n    return false;\n  };\n};\n\nTEST_F(RacerDubinsElevationTest, TestComputeGradComputation)\n{\n  GTEST_SKIP();\n  Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM,\n                RacerDubinsElevation::STATE_DIM + RacerDubinsElevation::CONTROL_DIM>\n      numeric_jac;\n  Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM,\n                RacerDubinsElevation::STATE_DIM + RacerDubinsElevation::CONTROL_DIM>\n      analytic_jac;\n\n  const int num_rollouts = 100;\n  Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, num_rollouts> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubinsElevation::CONTROL_DIM, num_rollouts>::Random();\n  Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, num_rollouts> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubinsElevation::STATE_DIM, num_rollouts>::Random();\n\n  // TODO properly scale the random values\n\n  auto analytic_grad_model = RacerDubinsElevation();\n\n  RacerDubinsElevation::dfdx A_analytic = RacerDubinsElevation::dfdx::Zero();\n  RacerDubinsElevation::dfdu B_analytic = RacerDubinsElevation::dfdu::Zero();\n\n  auto numerical_grad_model = LinearDummy();\n\n  std::shared_ptr<ModelWrapperDDP<LinearDummy>> ddp_model =\n      std::make_shared<ModelWrapperDDP<LinearDummy>>(&numerical_grad_model);\n\n  auto params = analytic_grad_model.getParams();\n  // params.c_t[0] = 3.0;\n  // params.c_b[0] = 0.2;\n  // params.c_v[0] = 0.2;\n  params.c_0 = 0.0;\n  params.wheel_base = 3.0;\n  params.steering_constant = 1.1;\n  params.low_min_throttle = 0.0;\n  params.max_brake_rate_pos = 10.0;\n  analytic_grad_model.setParams(params);\n  numerical_grad_model.setParams(params);\n\n  auto limits = analytic_grad_model.getControlRanges();\n  limits[0].x = -0.3;\n  limits[0].y = 1.0;\n  limits[1].x = -1.0;\n  limits[1].y = 1.0;\n  analytic_grad_model.setControlRanges(limits);\n  numerical_grad_model.setControlRanges(limits);\n\n  state_trajectory.col(0) << 5, M_PI_4, 1, 1, 3, 0, 0, 0, 2;\n  control_trajectory.col(0) << 0.5, 1;\n\n  state_trajectory.col(1) << 5, M_PI_4, 1, 1, 3, 0, 0, 0, 2;\n  control_trajectory.col(1) << -1.0, 1;\n\n  // state_trajectory.col(5) = state_trajectory.col(5).cwiseAbs();\n\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    RacerDubinsElevation::state_array state = state_trajectory.col(i);\n    RacerDubinsElevation::control_array control = control_trajectory.col(i);\n\n    if (abs(state(0)) < 1)\n    {\n      state(0) = state(0) * 10;\n      state(1) = state(1) * M_PI;\n      state(2) = state(2) * 100;\n      state(3) = state(3) * 100;\n      state(4) = state(4) * 5;\n      state(6) = state(6) * M_PI_2;\n      state(7) = state(7) * M_PI_2;\n      state(8) = state(8) * 10;\n    }\n    state(5) = min(abs(state(5) / 0.33f), 0.3f);\n\n    // std::cout << \"iteration \" << i << std::endl;\n    // std::cout << \"state: \" << state.transpose() << std::endl;\n    // std::cout << \"control: \" << control.transpose() << std::endl;\n\n    bool analytic_grad = analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n    EXPECT_TRUE(analytic_grad);\n\n    analytic_jac.leftCols<RacerDubinsElevation::STATE_DIM>() = A_analytic;\n    analytic_jac.rightCols<RacerDubinsElevation::CONTROL_DIM>() = B_analytic;\n    numeric_jac = ddp_model->df(state, control);\n\n    EXPECT_LT((numeric_jac - analytic_jac).norm(), 5e-2)\n        << \"at index \" << i << \"\\nstate: \" << state.transpose() << \"\\ncontrol \" << control.transpose()\n        << \"\\nNumeric Jacobian\\n\"\n        << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n        << analytic_jac;\n  }\n}\n"
  },
  {
    "path": "tests/dynamics/racer_dubins_elevation_suspension_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/racer_dubins/racer_dubins_elevation_suspension_lstm.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <racer_test_networks.h>\n#include <cuda_runtime.h>\n\nclass RacerDubinsElevationSuspensionTest : public ::testing::Test\n{\npublic:\n  cudaStream_t stream;\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  float map_resolution = 10.0f;          // [m / px]\n  float3 origin = make_float3(1, 2, 3);  // [m, m, m]\n\n  std::vector<float> data_vec;\n  std::vector<float4> normal_vec;\n\n  void SetUp() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n    data_vec.resize(extent.width * extent.height);\n    normal_vec.resize(extent.width * extent.height);\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      data_vec[i] = i * 1.0f;\n      Eigen::Vector4f random_normal = Eigen::Vector4f::Random();\n      normal_vec[i] = make_float4(random_normal.x(), random_normal.y(), random_normal.z(), random_normal.w());\n    }\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamDestroy(stream));\n  }\n};\n\nTEST_F(RacerDubinsElevationSuspensionTest, Template)\n{\n  using DYNAMICS = RacerDubinsElevationSuspension;\n  DYNAMICS dynamics = DYNAMICS(mppi::tests::steering_lstm, stream);\n  EXPECT_EQ(24, DYNAMICS::STATE_DIM);\n  EXPECT_EQ(2, DYNAMICS::CONTROL_DIM);\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n}\n\nTEST_F(RacerDubinsElevationSuspensionTest, TestStepGPUvsCPU)\n{\n  const int num_rollouts = 1000;\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationSuspension;\n  DYN dynamics = DYN(mppi::tests::steering_lstm, stream);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.network_d_, dynamics.getHelper()->getLSTMDevicePtr());\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationSuspension>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n\nTEST_F(RacerDubinsElevationSuspensionTest, TestStepGPUvsCPUWithPartialElevationMap)\n{\n  const int num_rollouts = 1000;\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationSuspension;\n  DYN dynamics = DYN(mppi::tests::steering_lstm, stream);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    if (i < 10)\n    {\n      data_vec[i] = NAN;\n    }\n  }\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n  helper->updateTexture(0, data_vec, extent);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.network_d_, dynamics.getHelper()->getLSTMDevicePtr());\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationSuspension>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n\nTEST_F(RacerDubinsElevationSuspensionTest, TestStepGPUvsCPUWithPartialElevationAndNormalMap)\n{\n  const int num_rollouts = 1000;\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationSuspension;\n  DYN dynamics = DYN(mppi::tests::steering_lstm, stream);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    if (i < 10)\n    {\n      data_vec[i] = NAN;\n      normal_vec[i] = make_float4(NAN, NAN, NAN, NAN);\n    }\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  TwoDTextureHelper<float4>* normal_helper = dynamics.getTextureHelperNormals();\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n  helper->updateTexture(0, data_vec, extent);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(false);\n  normal_helper->updateRotation(0, new_rot_mat);\n  normal_helper->updateOrigin(0, origin);\n  normal_helper->updateTexture(0, normal_vec, extent);\n  normal_helper->updateResolution(0, map_resolution);\n  normal_helper->enableTexture(0);\n  normal_helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  EXPECT_NE(dynamics.getHelper()->getLSTMDevicePtr(), nullptr);\n  EXPECT_NE(dynamics.network_d_, nullptr);\n  EXPECT_EQ(dynamics.network_d_, dynamics.getHelper()->getLSTMDevicePtr());\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationSuspension>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n\nTEST_F(RacerDubinsElevationSuspensionTest, TestStepGPUvsCPUOutput)\n{\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationSuspension;\n  DYN dynamics = DYN(mppi::tests::steering_lstm, stream);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  helper->setExtent(0, extent);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n\n  helper->updateTexture(0, data_vec);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationSuspension>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n\nTEST_F(RacerDubinsElevationSuspensionTest, TestStepGPUvsCPUWithPartialElevationAndNormalMapCheckOutput)\n{\n  const float dt = 0.1f;\n  CudaCheckError();\n  using DYN = RacerDubinsElevationSuspension;\n  DYN dynamics = DYN(mppi::tests::steering_lstm, stream);\n  EXPECT_FLOAT_EQ(dynamics.getParams().max_steer_rate, 17.590296);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steering_constant, 3.286375);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_constant, 9.301527);\n  EXPECT_FLOAT_EQ(dynamics.getParams().steer_accel_drag_constant, -0.60327667);\n\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    if (i < 10)\n    {\n      data_vec[i] = NAN;\n      normal_vec[i] = make_float4(NAN, NAN, NAN, NAN);\n    }\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n\n  TwoDTextureHelper<float>* helper = dynamics.getTextureHelper();\n  TwoDTextureHelper<float4>* normal_helper = dynamics.getTextureHelperNormals();\n  helper->updateRotation(0, new_rot_mat);\n  helper->updateOrigin(0, origin);\n  helper->updateTexture(0, data_vec, extent);\n  helper->updateResolution(0, map_resolution);\n  helper->enableTexture(0);\n  helper->copyToDevice(false);\n  normal_helper->updateRotation(0, new_rot_mat);\n  normal_helper->updateOrigin(0, origin);\n  normal_helper->updateTexture(0, normal_vec, extent);\n  normal_helper->updateResolution(0, map_resolution);\n  normal_helper->enableTexture(0);\n  normal_helper->copyToDevice(true);\n\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  DYN::buffer_trajectory buffer;\n  buffer[\"VEL_X\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_ANGLE_RATE\"] = Eigen::VectorXf::Random(51);\n  buffer[\"STEER_CMD\"] = Eigen::VectorXf::Random(51);\n\n  checkGPUComputationStep<RacerDubinsElevationSuspension>(dynamics, dt, 16, 32, buffer, 1.0e-4);\n}\n"
  },
  {
    "path": "tests/dynamics/racer_dubins_model_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/racer_dubins/racer_dubins.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <cuda_runtime.h>\n\nTEST(RacerDubins, Template)\n{\n  auto dynamics = RacerDubins();\n  EXPECT_EQ(7, RacerDubins::STATE_DIM);\n  EXPECT_EQ(2, RacerDubins::CONTROL_DIM);\n}\n\nTEST(RacerDubins, BindStream)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  auto dynamics = RacerDubins(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\n/*\nfloat c_t = 1.3;\nfloat c_b = 2.5;\nfloat c_v = 3.7;\nfloat c_0 = 4.9;\nfloat wheel_base = 0.3;\n */\n\nTEST(RacerDubins, ComputeDynamics)\n{\n  RacerDubins dynamics = RacerDubins();\n  RacerDubinsParams params = dynamics.getParams();\n  RacerDubins::state_array x = RacerDubins::state_array::Zero();\n  RacerDubins::control_array u = RacerDubins::control_array::Zero();\n\n  RacerDubins::state_array next_x = RacerDubins::state_array::Zero();\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 0);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 1, M_PI_2, 0, 3, 0, 0, 0;\n  u << 1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 + 1.3 - 3.7);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_NEAR(next_x(2), 0, 1e-7);\n  EXPECT_FLOAT_EQ(next_x(3), 1);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 1, 0, 0, 3, 0, 0, 0;\n  u << -1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 1);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0.33);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 1, 0, 0, 3, 0, 0.33, 0;\n  u << -1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 - 0.33 * 2.5 - 3.7);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 1);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0.33);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 1, 0, 0, 3, 0, 1.0, 0;\n  u << 1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 - 2.5 + 1.3 - 3.7);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 1);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), -0.9);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << -1, 0, 0, 3, 0, 0, 0;\n  u << 1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.7 + 1.3);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), -1);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << -1, 0, 0, 3, 0, 1.0, 0;\n  u << -1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 + 2.5 + 3.7);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), -1);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << -3, 0, 0, 3, 0, 1.0, 0;\n  u << -1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 + 2.5 + 3.7 * 3);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), -3);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 4, 0, 0, 3, 0, 1.0, 0;\n  u << -1, 0;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 - 2.5 - 3.7 * 4);\n  EXPECT_FLOAT_EQ(next_x(1), 0);\n  EXPECT_FLOAT_EQ(next_x(2), 4);\n  EXPECT_FLOAT_EQ(next_x(3), 0);\n  EXPECT_FLOAT_EQ(next_x(4), 0);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 1, M_PI, 0, 3, 0, 0, 0;\n  u << 0, 1;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7);\n  EXPECT_FLOAT_EQ(next_x(1), (1 / .3) * tan(0));\n  EXPECT_FLOAT_EQ(next_x(2), -1);\n  EXPECT_NEAR(next_x(3), 0, 1e-7);\n  EXPECT_FLOAT_EQ(next_x(4), 1 * 5 * 0.6);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n\n  x << 1, M_PI, 0, 0, 0.5, 0, 0;\n  u << 1, -1;\n  dynamics.computeDynamics(x, u, next_x);\n  EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7 + 1.3);\n  EXPECT_FLOAT_EQ(next_x(1), (1 / .3) * tan(0.5 / -9.1));\n  EXPECT_FLOAT_EQ(next_x(2), -1);\n  EXPECT_NEAR(next_x(3), 0, 1e-7);\n  EXPECT_FLOAT_EQ(next_x(4), (-1 * 5 - 0.5) * 0.6);\n  EXPECT_FLOAT_EQ(next_x(5), 0);\n  EXPECT_FLOAT_EQ(next_x(6), 0);\n}\n\nTEST(RacerDubins, enforceLeash)\n{\n  RacerDubins dynamics = RacerDubins();\n  RacerDubins::state_array state_true = RacerDubins::state_array::Zero();\n  RacerDubins::state_array state_nominal = RacerDubins::state_array::Zero();\n  RacerDubins::state_array leash_values = RacerDubins::state_array::Zero();\n  RacerDubins::state_array output;\n  double tol = 1e-7;\n\n  // if the two are equal it should match\n  state_true = RacerDubins::state_array::Random();\n  state_nominal = state_true;\n  dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n  // std::cout << \"true   : \" << state_true.transpose() << std::endl;\n  // std::cout << \"nominal: \" << state_nominal.transpose() << std::endl;\n  // std::cout << \"output : \" << output.transpose() << std::endl;\n  EXPECT_LT((output - state_true).norm(), tol);\n\n  // if the two are very far apart but zero leash values\n  state_nominal << 1, 1, 1, 1, 1, 1, 1, 1;\n  dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n  EXPECT_LT((output - state_true).norm(), tol);\n\n  // if the two are far apart but large leash values\n  state_nominal << 1, 1, 1, 1, 1, 1, 1, 1;\n  leash_values << 2, 2, 2, 2, 2, 2, 2, 2;\n  dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n  EXPECT_LT((output - state_nominal).norm(), tol);\n\n  // check yaw discont\n  state_true << -3, -3, -3, -3, -3, -3, -3;\n  state_nominal << 3, 3, 3, 3, 3, 3, 3;\n  leash_values << 0, 1.0, 0, 0, 0, 0, 0;\n  dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n  EXPECT_FLOAT_EQ(output(0), -3);\n  EXPECT_FLOAT_EQ(output(1), 3);\n  EXPECT_FLOAT_EQ(output(2), -3);\n  EXPECT_FLOAT_EQ(output(3), -3);\n  EXPECT_FLOAT_EQ(output(4), -3);\n  EXPECT_FLOAT_EQ(output(5), -3);\n  EXPECT_FLOAT_EQ(output(6), -3);\n\n  // check yaw discont clamp\n  state_true << -3, -3, -3, -3, -3, -3, -3;\n  state_nominal << 3, 3, 3, 3, 3, 3, 3;\n  leash_values << 0, 0.15, 0, 0, 0, 0, 0;\n  dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n  EXPECT_FLOAT_EQ(output(0), -3);\n  EXPECT_FLOAT_EQ(output(1), angle_utils::normalizeAngle(-3 - 0.15));\n  EXPECT_FLOAT_EQ(output(2), -3);\n  EXPECT_FLOAT_EQ(output(3), -3);\n  EXPECT_FLOAT_EQ(output(4), -3);\n  EXPECT_FLOAT_EQ(output(5), -3);\n  EXPECT_FLOAT_EQ(output(6), -3);\n\n  // check yaw discont clamp\n  state_true << 3, 3, 3, 3, 3, 3, 3;\n  state_nominal << -3, -3, -3, -3, -3, -3, -3;\n  leash_values << 0, 0.15, 0, 0, 0, 0, 0;\n  dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n  EXPECT_FLOAT_EQ(output(0), 3);\n  EXPECT_FLOAT_EQ(output(1), angle_utils::normalizeAngle(3 + 0.15));\n  EXPECT_FLOAT_EQ(output(2), 3);\n  EXPECT_FLOAT_EQ(output(3), 3);\n  EXPECT_FLOAT_EQ(output(4), 3);\n  EXPECT_FLOAT_EQ(output(5), 3);\n  EXPECT_FLOAT_EQ(output(6), 3);\n\n  leash_values = RacerDubins::state_array::Ones();\n  std::cout << \"=========\" << std::endl;\n\n  for (int i = 0; i < RacerDubins::STATE_DIM; i++)\n  {\n    state_true = RacerDubins::state_array::Zero();\n    state_nominal = RacerDubins::state_array::Zero();\n\n    state_true(i) = 1.0;\n    state_nominal(i) = 1.1;\n\n    dynamics.enforceLeash(state_true, state_nominal, leash_values, output);\n\n    for (int j = 0; j < RacerDubins::STATE_DIM; j++)\n    {\n      if (i == j)\n      {\n        EXPECT_FLOAT_EQ(output(j), 1.1);\n      }\n      else\n      {\n        EXPECT_FLOAT_EQ(output(j), 0.0);\n      }\n    }\n  }\n}\n\nTEST(RacerDubins, TestModelGPU)\n{\n  RacerDubins dynamics = RacerDubins();\n  dynamics.GPUSetup();\n\n  Eigen::Matrix<float, RacerDubins::CONTROL_DIM, 100> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubins::CONTROL_DIM, 100>::Random();\n  Eigen::Matrix<float, RacerDubins::STATE_DIM, 100> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubins::STATE_DIM, 100>::Random();\n\n  std::vector<std::array<float, 8>> s(100);\n  std::vector<std::array<float, 8>> s_der(100);\n  // steering, throttle\n  std::vector<std::array<float, 2>> u(100);\n  for (int state_index = 0; state_index < s.size(); state_index++)\n  {\n    for (int dim = 0; dim < s[0].size(); dim++)\n    {\n      s[state_index][dim] = state_trajectory.col(state_index)(dim);\n    }\n    for (int dim = 0; dim < u[0].size(); dim++)\n    {\n      u[state_index][dim] = control_trajectory.col(state_index)(dim);\n    }\n  }\n\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 4; y_dim++)\n  {\n    launchComputeDynamicsTestKernel<RacerDubins, 8, 2>(dynamics, s, u, s_der, y_dim);\n    for (int point = 0; point < 100; point++)\n    {\n      RacerDubins::state_array state = state_trajectory.col(point);\n      RacerDubins::control_array control = control_trajectory.col(point);\n      RacerDubins::state_array state_der_cpu = RacerDubins::state_array::Zero();\n\n      dynamics.computeDynamics(state, control, state_der_cpu);\n\n      for (int dim = 0; dim < RacerDubins::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state(dim), s[point][dim], 1e-5)\n            << \"at sample \" << point << \", state dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s[point][dim]));\n      }\n      for (int dim = 0; dim < RacerDubins::CONTROL_DIM; dim++)\n      {\n        EXPECT_NEAR(control(dim), u[point][dim], 1e-5)\n            << \"at sample \" << point << \", state dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(u[point][dim]));\n      }\n      for (int dim = 0; dim < RacerDubins::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5)\n            << \"at sample \" << point << \", state dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_der[point][dim]));\n      }\n    }\n  }\n\n  dynamics.freeCudaMem();\n}\n\nTEST(RacerDubins, TestUpdateState)\n{\n  RacerDubins dynamics = RacerDubins();\n  std::array<float2, 2> ranges{};\n  ranges[0].y = FLT_MAX;\n  ranges[0].x = -1;\n  ranges[1].y = FLT_MAX;\n  ranges[1].x = -FLT_MAX;\n  dynamics.setControlRanges(ranges);\n  RacerDubins::state_array state;\n  RacerDubins::state_array state_der;\n\n  state << 0, 0, 0, 0, 0, 0, 0;\n  state_der << 1, 1, 1, 1, 1, 1, 0;\n  dynamics.updateState(state, state_der, 0.1);\n  EXPECT_TRUE(state_der != RacerDubins::state_array::Zero());\n  EXPECT_FLOAT_EQ(state(0), 0.1);\n  EXPECT_NEAR(state(1), 0.1, 1e-7);\n  EXPECT_FLOAT_EQ(state(2), 0.1);\n  EXPECT_FLOAT_EQ(state(3), 0.1);\n  EXPECT_FLOAT_EQ(state(4), 0.1);\n  EXPECT_FLOAT_EQ(state(5), 0.1);\n  EXPECT_FLOAT_EQ(state(6), 1);\n\n  state << 0, M_PI - 0.1, 0, 0, 0, 0, 0;\n  state_der << 1, 1, 1, 1, 1, -1, 1;\n  dynamics.updateState(state, state_der, 1.0);\n  EXPECT_TRUE(state_der != RacerDubins::state_array::Zero());\n  EXPECT_FLOAT_EQ(state(0), 1.0);\n  EXPECT_FLOAT_EQ(state(1), 1.0 - M_PI - 0.1);\n  EXPECT_FLOAT_EQ(state(2), 1.0);\n  EXPECT_FLOAT_EQ(state(3), 1.0);\n  EXPECT_FLOAT_EQ(state(4), 0.5);  // max steer angle is 0.5\n  EXPECT_FLOAT_EQ(state(5), 0);\n  EXPECT_FLOAT_EQ(state(6), 1);\n\n  state << 0, M_PI - 0.1, 0, 0, 0, 0, 0;\n  state_der << 1, 1, 1, 1, 1, 2, 1;\n  dynamics.updateState(state, state_der, 1.0);\n  EXPECT_TRUE(state_der != RacerDubins::state_array::Zero());\n  EXPECT_FLOAT_EQ(state(0), 1.0);\n  EXPECT_FLOAT_EQ(state(1), 1.0 - M_PI - 0.1);\n  EXPECT_FLOAT_EQ(state(2), 1.0);\n  EXPECT_FLOAT_EQ(state(3), 1.0);\n  EXPECT_FLOAT_EQ(state(4), 0.5);  // max steer angle is 0.5\n  EXPECT_FLOAT_EQ(state(5), 1.0);\n  EXPECT_FLOAT_EQ(state(6), 1);\n\n  state << 0, -M_PI + 0.1, 0, 0, 0, 0, 0;\n  state_der << 1, -1, 1, 1, 1, 1, 1;\n  dynamics.updateState(state, state_der, 1.0);\n  EXPECT_TRUE(state_der != RacerDubins::state_array::Zero());\n  EXPECT_FLOAT_EQ(state(0), 1.0);\n  EXPECT_FLOAT_EQ(state(1), M_PI + 0.1 - 1.0);\n  EXPECT_FLOAT_EQ(state(2), 1.0);\n  EXPECT_FLOAT_EQ(state(3), 1.0);\n  EXPECT_FLOAT_EQ(state(4), 0.5);  // max steer angle is 0.5\n  EXPECT_FLOAT_EQ(state(5), 1);\n  EXPECT_FLOAT_EQ(state(6), 1);\n}\n\nTEST(RacerDubins, TestUpdateStateGPU)\n{\n  RacerDubins dynamics = RacerDubins();\n  dynamics.GPUSetup();\n\n  Eigen::Matrix<float, RacerDubins::CONTROL_DIM, 100> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubins::CONTROL_DIM, 100>::Random();\n  Eigen::Matrix<float, RacerDubins::STATE_DIM, 100> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubins::STATE_DIM, 100>::Random();\n\n  std::vector<std::array<float, RacerDubins::STATE_DIM>> s(100);\n  std::vector<std::array<float, RacerDubins::STATE_DIM>> s_der(100);\n  // steering, throttle\n  std::vector<std::array<float, RacerDubins::CONTROL_DIM>> u(100);\n\n  RacerDubins::state_array state;\n  RacerDubins::control_array control;\n  RacerDubins::state_array state_der_cpu = RacerDubins::state_array::Zero();\n\n  // Run dynamics on dynamicsU\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= 4; y_dim++)\n  {\n    for (int state_index = 0; state_index < s.size(); state_index++)\n    {\n      for (int dim = 0; dim < s[0].size(); dim++)\n      {\n        s[state_index][dim] = state_trajectory.col(state_index)(dim);\n      }\n      for (int dim = 0; dim < u[0].size(); dim++)\n      {\n        u[state_index][dim] = control_trajectory.col(state_index)(dim);\n      }\n    }\n\n    launchComputeStateDerivTestKernel<RacerDubins, RacerDubins::STATE_DIM, RacerDubins::CONTROL_DIM>(dynamics, s, u,\n                                                                                                     s_der, y_dim);\n    launchUpdateStateTestKernel<RacerDubins, RacerDubins::STATE_DIM>(dynamics, s, s_der, 0.1f, y_dim);\n    for (int point = 0; point < 100; point++)\n    {\n      RacerDubins::state_array state = state_trajectory.col(point);\n      RacerDubins::control_array control = control_trajectory.col(point);\n      RacerDubins::state_array state_der_cpu = RacerDubins::state_array::Zero();\n\n      dynamics.computeDynamics(state, control, state_der_cpu);\n      dynamics.updateState(state, state_der_cpu, 0.1f);\n      for (int dim = 0; dim < RacerDubins::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s[point][dim]));\n      }\n    }\n  }\n  dynamics.freeCudaMem();\n}\n\nTEST(RacerDubins, ComputeStateTrajectoryFiniteTest)\n{\n  RacerDubins dynamics = RacerDubins();\n  RacerDubinsParams params;\n  params.c_t[0] = 3.0;\n  params.c_b[0] = 0.2;\n  params.c_v[0] = 0.2;\n  params.c_0 = 0.2;\n  params.wheel_base = 3.0;\n  params.steering_constant = 1.0;\n  dynamics.setParams(params);\n\n  Eigen::Matrix<float, RacerDubins::CONTROL_DIM, 500> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, RacerDubins::CONTROL_DIM, 500>::Zero();\n  Eigen::Matrix<float, RacerDubins::STATE_DIM, 500> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, RacerDubins::STATE_DIM, 500>::Zero();\n  RacerDubins::state_array state_der;\n  RacerDubins::state_array x;\n  x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -0.000735827;\n\n  for (int i = 0; i < 500; i++)\n  {\n    RacerDubins::control_array u = control_trajectory.col(i);\n    dynamics.computeDynamics(x, u, state_der);\n    EXPECT_TRUE(x.allFinite());\n    EXPECT_TRUE(u.allFinite());\n    EXPECT_TRUE(state_der.allFinite());\n    dynamics.updateState(x, state_der, 0.02);\n    EXPECT_TRUE(x.allFinite());\n    EXPECT_TRUE(u.allFinite());\n    EXPECT_TRUE(state_der != RacerDubins::state_array::Zero());\n  }\n  params.steering_constant = 0.5;\n  dynamics.setParams(params);\n\n  x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -1.0;\n  for (int i = 0; i < 500; i++)\n  {\n    RacerDubins::control_array u = control_trajectory.col(i);\n    dynamics.computeDynamics(x, u, state_der);\n    EXPECT_TRUE(x.allFinite());\n    EXPECT_TRUE(u.allFinite());\n    EXPECT_TRUE(state_der.allFinite());\n    dynamics.updateState(x, state_der, 0.02);\n    EXPECT_TRUE(x.allFinite());\n    EXPECT_TRUE(u.allFinite());\n    EXPECT_TRUE(state_der != RacerDubins::state_array::Zero());\n  }\n}\n\n/*\nclass LinearDummy : public RacerDubins {\npublic:\n  bool computeGrad(const Eigen::Ref<const state_array> & state,\n                   const Eigen::Ref<const control_array>& control,\n                   Eigen::Ref<dfdx> A,\n                   Eigen::Ref<dfdu> B) {\n    return false;\n  };\n};\n\nTEST(RacerDubins, TestComputeGradComputation) {\n  Eigen::Matrix<float, RacerDubins::STATE_DIM, RacerDubins::STATE_DIM + RacerDubins::CONTROL_DIM> numeric_jac;\n  Eigen::Matrix<float, RacerDubins::STATE_DIM, RacerDubins::STATE_DIM + RacerDubins::CONTROL_DIM> analytic_jac;\n  RacerDubins::state_array state;\n  state << 1, 2, 3, 4;\n  RacerDubins::control_array control;\n  control << 5;\n\n  auto analytic_grad_model = RacerDubins();\n\n  RacerDubins::dfdx A_analytic = RacerDubins::dfdx::Zero();\n  RacerDubins::dfdu B_analytic = RacerDubins::dfdu::Zero();\n\n  analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n\n  auto numerical_grad_model = LinearDummy();\n\n  std::shared_ptr<ModelWrapperDDP<LinearDummy>> ddp_model =\nstd::make_shared<ModelWrapperDDP<LinearDummy>>(&numerical_grad_model);\n\n  analytic_jac.leftCols<RacerDubins::STATE_DIM>() = A_analytic;\n  analytic_jac.rightCols<RacerDubins::CONTROL_DIM>() = B_analytic;\n  numeric_jac = ddp_model->df(state, control);\n\n  ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\" << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n<< analytic_jac;\n}\n\n*/\n"
  },
  {
    "path": "tests/dynamics/racer_suspension_model_test.cu",
    "content": "#include <Eigen/Dense>\n#include <gtest/gtest.h>\n#include <mppi/dynamics/racer_suspension/racer_suspension.cuh>\n#include <kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh>\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/ddp/ddp_model_wrapper.h>\n#include <cuda_runtime.h>\n\ntemplate <class DYN_T, int NUM_ROLLOUTS, int BLOCKSIZE_X, int BLOCKSIZE_Y, int BLOCKSIZE_Z = 1>\n__global__ void runGPUDynamics(DYN_T* dynamics, const int num_timesteps, float dt, const float* __restrict__ x_init_d,\n                               const float* __restrict__ u_d, float* __restrict__ x_next_d,\n                               float* __restrict__ output_d)\n{\n  extern __shared__ float entire_buffer[];\n  float* x_shared = entire_buffer;\n  float* x_dot_shared = &x_shared[mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Z * 2 * DYN_T::STATE_DIM)];\n  float* u_shared = &x_dot_shared[mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Z * DYN_T::STATE_DIM)];\n  float* y_shared = &u_shared[mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Z * DYN_T::CONTROL_DIM)];\n  float* theta_s = &y_shared[mppi::math::nearest_multiple_4(BLOCKSIZE_X * BLOCKSIZE_Z * 2 * DYN_T::OUTPUT_DIM)];\n\n  int global_idx = blockIdx.x * blockDim.x + threadIdx.x;\n  int j = 0;\n\n  float* x;\n  float* x_next;\n  float* x_temp;\n  float* x_dot;\n  float* u;\n  float* y;\n\n  x = &x_shared[(BLOCKSIZE_X * threadIdx.z + threadIdx.x + 0) * DYN_T::STATE_DIM];\n  x_next = &x_shared[(BLOCKSIZE_X * threadIdx.z + threadIdx.x + 1) * DYN_T::STATE_DIM];\n  x_dot = &x_dot_shared[(BLOCKSIZE_X * threadIdx.z + threadIdx.x) * DYN_T::STATE_DIM];\n  u = &u_shared[(BLOCKSIZE_X * threadIdx.z + threadIdx.x) * DYN_T::CONTROL_DIM];\n  y = &y_shared[(BLOCKSIZE_X * threadIdx.z + threadIdx.x) * DYN_T::OUTPUT_DIM];\n  for (j = threadIdx.y; j < DYN_T::STATE_DIM; j += blockDim.y)\n  {\n    x[j] = x_init_d[j];\n    x_dot[j] = 0;\n  }\n  for (j = threadIdx.y; j < DYN_T::OUTPUT_DIM; j += blockDim.y)\n  {\n    y[j] = 0;\n  }\n  __syncthreads();\n  dynamics->initializeDynamics(x, u, y, theta_s, 0, dt);\n  __syncthreads();\n  for (int t = 0; t < num_timesteps; t++)\n  {\n    for (j = threadIdx.y; j < DYN_T::CONTROL_DIM; j += blockDim.y)\n    {\n      u[j] = u_d[global_idx * num_timesteps * DYN_T::CONTROL_DIM + t * DYN_T::CONTROL_DIM + j];\n    }\n    for (j = threadIdx.y; j < DYN_T::STATE_DIM; j += blockDim.y)\n    {\n      x_next_d[global_idx * num_timesteps * DYN_T::STATE_DIM + t * DYN_T::STATE_DIM + j] = x[j];\n    }\n    for (j = threadIdx.y; j < DYN_T::OUTPUT_DIM; j += blockDim.y)\n    {\n      output_d[global_idx * num_timesteps * DYN_T::OUTPUT_DIM + t * DYN_T::OUTPUT_DIM + j] = y[j];\n    }\n    __syncthreads();\n    dynamics->enforceConstraints(x, u);\n    __syncthreads();\n    dynamics->step(x, x_next, x_dot, u, y, theta_s, t, dt);\n    __syncthreads();\n    x_temp = x;\n    x = x_next;\n    x_next = x_temp;\n  }\n}\n\nclass RacerSuspensionTest : public ::testing::Test\n{\npublic:\n  cudaStream_t stream;\n\n  void SetUp() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n    HANDLE_ERROR(cudaStreamDestroy(stream));\n  }\n};\n\nTEST_F(RacerSuspensionTest, Template)\n{\n  auto dynamics = RacerSuspension(stream);\n  EXPECT_EQ(14, RacerSuspension::STATE_DIM);\n  EXPECT_EQ(2, RacerSuspension::CONTROL_DIM);\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n}\n\nTEST_F(RacerSuspensionTest, BindStream)\n{\n  auto dynamics = RacerSuspension(stream);\n\n  EXPECT_EQ(dynamics.stream_, stream) << \"Stream binding failure.\";\n  EXPECT_NE(dynamics.getTextureHelper(), nullptr);\n  EXPECT_EQ(dynamics.getTextureHelper()->stream_, stream);\n}\n\nTEST_F(RacerSuspensionTest, OmegaJacobian)\n{\n  using DYN_PARAMS = RacerSuspensionParams;\n  auto dynamics = RacerSuspension(stream);\n  RacerSuspension::state_array x = RacerSuspension::state_array::Zero();\n  x[S_IND_CLASS(DYN_PARAMS, ATTITUDE_QW)] = 1;\n  x[S_IND_CLASS(DYN_PARAMS, OMEGA_B_X)] = 0.1;\n  x[S_IND_CLASS(DYN_PARAMS, OMEGA_B_Y)] = -0.03;\n  x[S_IND_CLASS(DYN_PARAMS, OMEGA_B_Z)] = 0.02;\n  x[S_IND_CLASS(DYN_PARAMS, V_I_X)] = 2;\n\n  RacerSuspension::state_array x_dot0 = RacerSuspension::state_array::Zero();\n  RacerSuspension::state_array x_dot1 = RacerSuspension::state_array::Zero();\n  RacerSuspension::control_array u = RacerSuspension::control_array::Zero();\n  RacerSuspension::output_array output = RacerSuspension::output_array::Zero();\n  Eigen::Matrix3f omega_jac;\n  float delta = 0.001;\n  dynamics.computeStateDeriv(x, u, x_dot0, output, &omega_jac);\n  for (int i = 0; i < 3; i++)\n  {\n    x[S_IND_CLASS(DYN_PARAMS, OMEGA_B_X) + i] += delta;\n    dynamics.computeStateDeriv(x, u, x_dot1, output);\n    x[S_IND_CLASS(DYN_PARAMS, OMEGA_B_X) + i] -= delta;\n\n    float abs_tol = 2;\n    EXPECT_NEAR(omega_jac.col(i)[0],\n                (x_dot1[S_IND_CLASS(DYN_PARAMS, OMEGA_B_X)] - x_dot0[S_IND_CLASS(DYN_PARAMS, OMEGA_B_X)]) / delta,\n                abs_tol);\n    EXPECT_NEAR(omega_jac.col(i)[1],\n                (x_dot1[S_IND_CLASS(DYN_PARAMS, OMEGA_B_Y)] - x_dot0[S_IND_CLASS(DYN_PARAMS, OMEGA_B_Y)]) / delta,\n                abs_tol);\n    EXPECT_NEAR(omega_jac.col(i)[2],\n                (x_dot1[S_IND_CLASS(DYN_PARAMS, OMEGA_B_Z)] - x_dot0[S_IND_CLASS(DYN_PARAMS, OMEGA_B_Z)]) / delta,\n                abs_tol);\n  }\n}\n\nTEST_F(RacerSuspensionTest, CPUvsGPU)\n{\n  GTEST_SKIP();\n  const int NUM_PARALLEL_TESTS = 10;  // MUST BE EVEN\n  const int NUM_TIMESTEPS = 8;\n  const float dt = 0.02;\n  typedef RacerSuspension DYN;\n  typedef Eigen::Matrix<float, DYN::STATE_DIM, NUM_TIMESTEPS> state_traj;\n  typedef Eigen::Matrix<float, DYN::CONTROL_DIM, NUM_TIMESTEPS> control_traj;\n  typedef Eigen::Matrix<float, DYN::OUTPUT_DIM, NUM_TIMESTEPS> output_traj;\n  typedef util::EigenAlignedVector<float, DYN::CONTROL_DIM, NUM_TIMESTEPS> control_trajectories;\n  typedef util::EigenAlignedVector<float, DYN::STATE_DIM, NUM_TIMESTEPS> state_trajectories;\n  typedef util::EigenAlignedVector<float, DYN::OUTPUT_DIM, NUM_TIMESTEPS> output_trajectories;\n  using state_array = typename DYN::state_array;\n  using control_array = typename DYN::control_array;\n  using output_array = typename DYN::output_array;\n  using DYN_PARAMS = RacerSuspensionParams;\n\n  auto dynamics = RacerSuspension(stream);\n  auto control_range = dynamics.getControlRanges();\n  control_range[0] = make_float2(-1, 1);\n  control_range[1] = make_float2(-1, 1);\n  dynamics.setControlRanges(control_range);\n  dynamics.GPUSetup();\n\n  std::default_random_engine generator(15.0);\n  std::normal_distribution<float> throttle_distribution(0.3, 0.3);\n  std::normal_distribution<float> steering_distribution(0.0, 0.8);\n\n  float* x_init_d;\n  float* u_d;\n  float* x_next_d;\n  float* output_d;\n\n  HANDLE_ERROR(cudaMalloc((void**)&x_init_d, sizeof(float) * DYN::STATE_DIM * NUM_PARALLEL_TESTS));\n  HANDLE_ERROR(cudaMalloc((void**)&u_d, sizeof(float) * DYN::CONTROL_DIM * NUM_PARALLEL_TESTS * NUM_TIMESTEPS));\n  HANDLE_ERROR(cudaMalloc((void**)&x_next_d, sizeof(float) * DYN::STATE_DIM * NUM_PARALLEL_TESTS * NUM_TIMESTEPS));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * DYN::OUTPUT_DIM * NUM_PARALLEL_TESTS * NUM_TIMESTEPS));\n\n  // Input variables\n  DYN::state_array x_init = DYN::state_array::Zero();\n  x_init[S_IND_CLASS(DYN_PARAMS, P_I_X)] = 10;\n  x_init[S_IND_CLASS(DYN_PARAMS, P_I_Y)] = 20;\n  x_init[S_IND_CLASS(DYN_PARAMS, P_I_Z)] = 30;\n  x_init[S_IND_CLASS(DYN_PARAMS, ATTITUDE_QW)] = 1;\n  HANDLE_ERROR(\n      cudaMemcpyAsync(x_init_d, x_init.data(), sizeof(float) * DYN::STATE_DIM, cudaMemcpyHostToDevice, stream));\n\n  control_trajectories u_noise;\n  for (int s = 0; s < NUM_PARALLEL_TESTS; s++)\n  {\n    control_traj sample_u = control_traj::Zero();\n    for (int t = 0; t < NUM_TIMESTEPS; t++)\n    {\n      sample_u(0, t) = throttle_distribution(generator);\n      sample_u(1, t) = steering_distribution(generator);\n    }\n    u_noise.push_back(sample_u);\n    HANDLE_ERROR(cudaMemcpyAsync(u_d + s * NUM_TIMESTEPS * DYN::CONTROL_DIM, u_noise[s].data(),\n                                 sizeof(float) * NUM_TIMESTEPS * DYN::CONTROL_DIM, cudaMemcpyHostToDevice, stream));\n  }\n\n  // Output variables\n  state_trajectories x_next_CPU(NUM_PARALLEL_TESTS);\n  state_trajectories x_next_GPU(NUM_PARALLEL_TESTS);\n  output_trajectories output_CPU(NUM_PARALLEL_TESTS);\n  output_trajectories output_GPU(NUM_PARALLEL_TESTS);\n\n  // CPU Test\n  for (int s = 0; s < NUM_PARALLEL_TESTS; s++)\n  {\n    state_traj sample_state_traj;\n    output_traj sample_output_traj;\n    state_array xdot;\n    state_array x = x_init;\n    state_array x_next = x_init;\n    output_array output = output_array::Zero();\n    control_array u;\n    for (int t = 0; t < NUM_TIMESTEPS; t++)\n    {\n      if (t == 0)\n      {\n        dynamics.initializeDynamics(x, u, output, t, dt);\n      }\n      u = u_noise[s].col(t);\n      sample_state_traj.col(t) = x;\n      sample_output_traj.col(t) = output;\n\n      dynamics.enforceConstraints(x, u);\n      dynamics.step(x, x_next, xdot, u, output, t, dt);\n      x = x_next;\n    }\n    x_next_CPU[s] = sample_state_traj;\n    output_CPU[s] = sample_output_traj;\n  }\n\n  // GPU Test\n  const int BLOCKSIZE_X = 2;\n  const int BLOCKSIZE_Y = 8;\n  const int NUM_GRIDS_X = (NUM_PARALLEL_TESTS - 1) / BLOCKSIZE_X + 1;\n  dim3 block_dim(BLOCKSIZE_X, BLOCKSIZE_Y, 1);\n  dim3 grid_dim(NUM_GRIDS_X, 1, 1);\n  // Ensure that there won't memory overwriting due to inproper indexing\n  static_assert(NUM_PARALLEL_TESTS % BLOCKSIZE_X == 0);\n\n  int num_shared = block_dim.x * block_dim.z;\n  unsigned shared_mem = mppi::kernels::calcClassSharedMemSize(&dynamics, block_dim) +\n                        sizeof(float) * (3 * mppi::math::nearest_multiple_4(num_shared * DYN::STATE_DIM) +\n                                         mppi::math::nearest_multiple_4(num_shared * DYN::OUTPUT_DIM) +\n                                         mppi::math::nearest_multiple_4(num_shared * DYN::CONTROL_DIM));\n  runGPUDynamics<DYN, NUM_PARALLEL_TESTS, BLOCKSIZE_X, BLOCKSIZE_Y><<<grid_dim, block_dim, shared_mem, stream>>>(\n      dynamics.model_d_, NUM_TIMESTEPS, dt, x_init_d, u_d, x_next_d, output_d);\n  for (int s = 0; s < NUM_PARALLEL_TESTS; s++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(x_next_GPU[s].data(), x_next_d + s * NUM_TIMESTEPS * DYN::STATE_DIM,\n                                 sizeof(float) * NUM_TIMESTEPS * DYN::STATE_DIM, cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(cudaMemcpyAsync(output_GPU[s].data(), output_d + s * NUM_TIMESTEPS * DYN::OUTPUT_DIM,\n                                 sizeof(float) * NUM_TIMESTEPS * DYN::OUTPUT_DIM, cudaMemcpyDeviceToHost, stream));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  for (int s = 0; s < NUM_PARALLEL_TESTS; s++)\n  {\n    for (int t = 0; t < NUM_TIMESTEPS; t++)\n    {\n      for (int d = 0; d < DYN::STATE_DIM; d++)\n      {\n        ASSERT_NEAR(x_next_CPU[s](d, t), x_next_GPU[s](d, t), 1.0)\n            << \"Sample \" << s << \", t = \" << t << \", state_dim: \" << d << std::endl;\n      }\n      for (int d = 0; d < DYN::OUTPUT_DIM; d++)\n      {\n        ASSERT_TRUE(isfinite(output_CPU[s](d, t)))\n            << \"NaNs/inf at sample \" << s << \" t: \" << t << \", dim: \" << d << std::endl;\n        ASSERT_NEAR(output_CPU[s](d, t), output_GPU[s](d, t), 1.0)\n            << \"Sample \" << s << \", t = \" << t << \", output_dim: \" << d << std::endl;\n      }\n    }\n  }\n}\n\n/*\nfloat c_t = 1.3;\nfloat c_b = 2.5;\nfloat c_v = 3.7;\nfloat c_0 = 4.9;\nfloat wheel_base = 0.3;\n */\n\n// TEST_F(RacerSuspensionTest, ComputeDynamics)\n// {\n//   RacerSuspension dynamics = RacerSuspension();\n//   RacerDubinsParams params = dynamics.getParams();\n//   RacerSuspension::state_array x = RacerSuspension::state_array::Zero();\n//   RacerSuspension::control_array u = RacerSuspension::control_array::Zero();\n//\n//   // computeDynamics should not touch the roll/pitch element\n//   RacerSuspension::state_array next_x = RacerSuspension::state_array::Ones() * 0.153;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 0);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << 1, M_PI_2, 0, 3, 0, 0.5, -0.5;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 1.3 - 3.7);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_NEAR(next_x(2), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(3), 1);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << 1, 0, 0, 3, 0, 0.5, -0.5;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 2.5 - 3.7);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << -1, 0, 0, 3, 0, 0.5, -0.5;\n//   u << 1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 3.7 + 1.3);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << -1, 0, 0, 3, 0, 0.5, -0.5;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 2.5 + 3.7);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << -3, 0, 0, 3, 0, 0.5, -0.5;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 + 2.5 * 3 + 3.7 * 3);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), -3);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << 4, 0, 0, 3, 0, 0.5, -0.5;\n//   u << -1, 0;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 2.5 * 4 - 3.7 * 4);\n//   EXPECT_FLOAT_EQ(next_x(1), 0);\n//   EXPECT_FLOAT_EQ(next_x(2), 4);\n//   EXPECT_FLOAT_EQ(next_x(3), 0);\n//   EXPECT_FLOAT_EQ(next_x(4), 0);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << 1, M_PI, 0, 3, 0, 0.5, -0.5;\n//   u << 0, 1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7);\n//   EXPECT_FLOAT_EQ(next_x(1), (1 / .3) * tan(0));\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_NEAR(next_x(3), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(4), -1 / 2.45);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n//\n//   x << 1, M_PI, 0, 0, 0.5, 0.5, -0.5;\n//   u << 1, -1;\n//   dynamics.computeDynamics(x, u, next_x);\n//   EXPECT_FLOAT_EQ(next_x(0), 4.9 - 3.7 + 1.3);\n//   EXPECT_FLOAT_EQ(next_x(1), (1 / .3) * tan(0.5));\n//   EXPECT_FLOAT_EQ(next_x(2), -1);\n//   EXPECT_NEAR(next_x(3), 0, 1e-7);\n//   EXPECT_FLOAT_EQ(next_x(4), 1 / 2.45);\n//   EXPECT_FLOAT_EQ(next_x(5), 0.153);\n//   EXPECT_FLOAT_EQ(next_x(6), 0.153);\n// }\n//\n// TEST_F(RacerSuspensionTest, TestModelGPU)\n// {\n//   RacerSuspension dynamics = RacerSuspension();\n//   dynamics.GPUSetup();\n//   CudaCheckError();\n//\n//   Eigen::Matrix<float, RacerSuspension::CONTROL_DIM, 100> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, RacerSuspension::CONTROL_DIM, 100>::Random();\n//   Eigen::Matrix<float, RacerSuspension::STATE_DIM, 100> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, RacerSuspension::STATE_DIM, 100>::Random();\n//\n//   std::vector<std::array<float, 7>> s(100);\n//   std::vector<std::array<float, 7>> s_der(100);\n//   // steering, throttle\n//   std::vector<std::array<float, 2>> u(100);\n//   for (int state_index = 0; state_index < s.size(); state_index++)\n//   {\n//     for (int dim = 0; dim < s[0].size(); dim++)\n//     {\n//       s[state_index][dim] = state_trajectory.col(state_index)(dim);\n//     }\n//     for (int dim = 0; dim < u[0].size(); dim++)\n//     {\n//       u[state_index][dim] = control_trajectory.col(state_index)(dim);\n//     }\n//   }\n//\n//   // These variables will be changed so initialized to the right size only\n//\n//   // Run dynamics on dynamicsU\n//   // Run dynamics on GPU\n//   for (int y_dim = 1; y_dim <= 4; y_dim++)\n//   {\n//     launchComputeDynamicsTestKernel<RacerSuspension, 7, 2>(dynamics, s, u, s_der, y_dim);\n//     for (int point = 0; point < 100; point++)\n//     {\n//       RacerSuspension::state_array state = state_trajectory.col(point);\n//       RacerSuspension::control_array control = control_trajectory.col(point);\n//       RacerSuspension::state_array state_der_cpu = RacerSuspension::state_array::Zero();\n//\n//       dynamics.computeDynamics(state, control, state_der_cpu);\n//       for (int dim = 0; dim < RacerSuspension::STATE_DIM; dim++)\n//       {\n//         EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], 1e-5) << \"at index \" << point << \" with y_dim \" << y_dim;\n//         EXPECT_TRUE(isfinite(s_der[point][dim]));\n//       }\n//     }\n//   }\n//\n//   dynamics.freeCudaMem();\n//   CudaCheckError();\n// }\n//\n// TEST_F(RacerSuspensionTest, TestUpdateState)\n// {\n//   CudaCheckError();\n//   RacerSuspension dynamics = RacerSuspension();\n//   RacerSuspension::state_array state;\n//   RacerSuspension::state_array state_der;\n//\n//   // TODO add in the elevation map\n//\n//   state << 0, 0, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, 1, 1, 1, 1, 0, 0;\n//   dynamics.updateState(state, state_der, 0.1);\n//   EXPECT_TRUE(state_der == RacerSuspension::state_array::Zero());\n//   EXPECT_FLOAT_EQ(state(0), 0.1);\n//   EXPECT_FLOAT_EQ(state(1), 0.1);\n//   EXPECT_FLOAT_EQ(state(2), 0.1);\n//   EXPECT_FLOAT_EQ(state(3), 0.1);\n//   EXPECT_FLOAT_EQ(state(4), 1.0 + (0 - 1.0) * expf(-0.6 * 0.1));\n//   EXPECT_FLOAT_EQ(state(5), -0.5);\n//   EXPECT_FLOAT_EQ(state(6), 0.5);\n//\n//   state << 0, M_PI - 0.1, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, 1, 1, 1, 1;\n//   dynamics.updateState(state, state_der, 1.0);\n//   EXPECT_TRUE(state_der == RacerSuspension::state_array::Zero());\n//   EXPECT_FLOAT_EQ(state(0), 1.0);\n//   EXPECT_FLOAT_EQ(state(1), 1.0 - M_PI - 0.1);\n//   EXPECT_FLOAT_EQ(state(2), 1.0);\n//   EXPECT_FLOAT_EQ(state(3), 1.0);\n//   EXPECT_FLOAT_EQ(state(4), 1.0 + (0 - 1.0) * expf(-0.6 * 1.0));\n//   EXPECT_FLOAT_EQ(state(5), -0.5);\n//   EXPECT_FLOAT_EQ(state(6), 0.5);\n//\n//   state << 0, -M_PI + 0.1, 0, 0, 0, -0.5, 0.5;\n//   state_der << 1, -1, 1, 1, 1;\n//   dynamics.updateState(state, state_der, 1.0);\n//   EXPECT_TRUE(state_der == RacerSuspension::state_array::Zero());\n//   EXPECT_FLOAT_EQ(state(0), 1.0);\n//   EXPECT_FLOAT_EQ(state(1), M_PI + 0.1 - 1.0);\n//   EXPECT_FLOAT_EQ(state(2), 1.0);\n//   EXPECT_FLOAT_EQ(state(3), 1.0);\n//   EXPECT_FLOAT_EQ(state(4), 1.0 + (0 - 1.0) * expf(-0.6 * 1.0));\n//   EXPECT_FLOAT_EQ(state(5), -0.5);\n//   EXPECT_FLOAT_EQ(state(6), 0.5);\n//\n//   CudaCheckError();\n// }\n//\n// TEST_F(RacerSuspensionTest, TestUpdateStateGPU)\n// {\n//   CudaCheckError();\n//   RacerSuspension dynamics = RacerSuspension();\n//   CudaCheckError();\n//   dynamics.GPUSetup();\n//   CudaCheckError();\n//\n//   Eigen::Matrix<float, RacerSuspension::CONTROL_DIM, 100> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, RacerSuspension::CONTROL_DIM, 100>::Random();\n//   Eigen::Matrix<float, RacerSuspension::STATE_DIM, 100> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, RacerSuspension::STATE_DIM, 100>::Random();\n//\n//   std::vector<std::array<float, 7>> s(100);\n//   std::vector<std::array<float, 7>> s_der(100);\n//   // steering, throttle\n//   std::vector<std::array<float, 2>> u(100);\n//\n//   RacerSuspension::state_array state;\n//   RacerSuspension::control_array control;\n//   RacerSuspension::state_array state_der_cpu = RacerSuspension::state_array::Zero();\n//\n//   // Run dynamics on dynamicsU\n//   // Run dynamics on GPU\n//   for (int y_dim = 1; y_dim <= 10; y_dim++)\n//   {\n//     for (int state_index = 0; state_index < s.size(); state_index++)\n//     {\n//       for (int dim = 0; dim < s[0].size(); dim++)\n//       {\n//         s[state_index][dim] = state_trajectory.col(state_index)(dim);\n//       }\n//       for (int dim = 0; dim < u[0].size(); dim++)\n//       {\n//         u[state_index][dim] = control_trajectory.col(state_index)(dim);\n//       }\n//     }\n//\n//     launchComputeStateDerivTestKernel<RacerSuspension, RacerSuspension::STATE_DIM,\n//                                       RacerSuspension::CONTROL_DIM>(dynamics, s, u, s_der, y_dim);\n//     launchUpdateStateTestKernel<RacerSuspension, RacerSuspension::STATE_DIM>(dynamics, s, s_der, 0.1f, y_dim);\n//     for (int point = 0; point < 100; point++)\n//     {\n//       RacerSuspension::state_array state = state_trajectory.col(point);\n//       RacerSuspension::control_array control = control_trajectory.col(point);\n//       RacerSuspension::state_array state_der_cpu = RacerSuspension::state_array::Zero();\n//\n//       dynamics.computeDynamics(state, control, state_der_cpu);\n//       dynamics.updateState(state, state_der_cpu, 0.1f);\n//       for (int dim = 0; dim < RacerSuspension::STATE_DIM; dim++)\n//       {\n//         if (dim < 5)\n//         {\n//           EXPECT_FLOAT_EQ(state_der_cpu(dim), s_der[point][dim]) << \"at index \" << point << \" with y_dim \" << y_dim;\n//           EXPECT_NEAR(state(dim), s[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n//           EXPECT_TRUE(isfinite(s[point][dim]));\n//         }\n//         else\n//         {\n//           EXPECT_FLOAT_EQ(state_der_cpu(dim), s_der[point][dim]) << \"at index \" << point << \" with y_dim \" << y_dim;\n//           EXPECT_NEAR(s[point][dim], 0.0, 1e-4)\n//               << \"at index \" << point << \" with y_dim \" << y_dim << \" state index \" << dim;\n//           EXPECT_TRUE(isfinite(s[point][dim]));\n//         }\n//       }\n//     }\n//   }\n//   dynamics.freeCudaMem();\n// }\n//\n// TEST_F(RacerSuspensionTest, ComputeStateTrajectoryFiniteTest)\n// {\n//   RacerSuspension dynamics = RacerSuspension();\n//   RacerDubinsParams params;\n//   params.c_t = 3.0;\n//   params.c_b = 0.2;\n//   params.c_v = 0.2;\n//   params.c_0 = 0.2;\n//   params.wheel_base = 3.0;\n//   params.steering_constant = 1.0;\n//   dynamics.setParams(params);\n//\n//   Eigen::Matrix<float, RacerSuspension::CONTROL_DIM, 500> control_trajectory;\n//   control_trajectory = Eigen::Matrix<float, RacerSuspension::CONTROL_DIM, 500>::Zero();\n//   Eigen::Matrix<float, RacerSuspension::STATE_DIM, 500> state_trajectory;\n//   state_trajectory = Eigen::Matrix<float, RacerSuspension::STATE_DIM, 500>::Zero();\n//   RacerSuspension::state_array state_der;\n//   RacerSuspension::state_array x;\n//   x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -0.000735827;\n//\n//   for (int i = 0; i < 500; i++)\n//   {\n//     RacerSuspension::control_array u = control_trajectory.col(i);\n//     dynamics.computeDynamics(x, u, state_der);\n//     EXPECT_TRUE(x.allFinite());\n//     EXPECT_TRUE(u.allFinite());\n//     EXPECT_TRUE(state_der.allFinite());\n//     dynamics.updateState(x, state_der, 0.02);\n//     EXPECT_TRUE(x.allFinite());\n//     EXPECT_TRUE(u.allFinite());\n//     EXPECT_TRUE(state_der == RacerSuspension::state_array::Zero());\n//   }\n//   params.steering_constant = 0.5;\n//   dynamics.setParams(params);\n//\n//   x << 0, 1.46919e-6, 0.0140179, 1.09739e-8, -1.0;\n//   for (int i = 0; i < 500; i++)\n//   {\n//     RacerSuspension::control_array u = control_trajectory.col(i);\n//     dynamics.computeDynamics(x, u, state_der);\n//     EXPECT_TRUE(x.allFinite());\n//     EXPECT_TRUE(u.allFinite());\n//     EXPECT_TRUE(state_der.allFinite());\n//     dynamics.updateState(x, state_der, 0.02);\n//     EXPECT_TRUE(x.allFinite());\n//     EXPECT_TRUE(u.allFinite());\n//     EXPECT_TRUE(state_der == RacerSuspension::state_array::Zero());\n//   }\n// }\n//\n// /*\n// class LinearDummy : public RacerSuspension {\n// public:\n//   bool computeGrad(const Eigen::Ref<const state_array> & state,\n//                    const Eigen::Ref<const control_array>& control,\n//                    Eigen::Ref<dfdx> A,\n//                    Eigen::Ref<dfdu> B) {\n//     return false;\n//   };\n// };\n//\n// TEST_F(RacerSuspensionTest, TestComputeGradComputation) {\n//   Eigen::Matrix<float, RacerSuspension::STATE_DIM, RacerSuspension::STATE_DIM +\n// RacerSuspension::CONTROL_DIM> numeric_jac; Eigen::Matrix<float, RacerSuspension::STATE_DIM,\n// RacerSuspension::STATE_DIM + RacerSuspension::CONTROL_DIM> analytic_jac; RacerSuspension::state_array\n// state; state << 1, 2, 3, 4; RacerSuspension::control_array control; control << 5;\n//\n//   auto analytic_grad_model = RacerSuspension();\n//\n//   RacerSuspension::dfdx A_analytic = RacerSuspension::dfdx::Zero();\n//   RacerSuspension::dfdu B_analytic = RacerSuspension::dfdu::Zero();\n//\n//   analytic_grad_model.computeGrad(state, control, A_analytic, B_analytic);\n//\n//   auto numerical_grad_model = LinearDummy();\n//\n//   std::shared_ptr<ModelWrapperDDP<LinearDummy>> ddp_model =\n// std::make_shared<ModelWrapperDDP<LinearDummy>>(&numerical_grad_model);\n//\n//   analytic_jac.leftCols<RacerSuspension::STATE_DIM>() = A_analytic;\n//   analytic_jac.rightCols<RacerSuspension::CONTROL_DIM>() = B_analytic;\n//   numeric_jac = ddp_model->df(state, control);\n//\n//   ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\" << numeric_jac << \"\\nAnalytic\n//   Jacobian\\n\"\n// << analytic_jac;\n// }\n//\n// */\n"
  },
  {
    "path": "tests/feedback_controllers/CMakeLists.txt",
    "content": "add_executable(generic_feedback_controller_test ${PROJECT_SOURCE_DIR}/tests/test_main.cpp generic_feedback_controller_test.cu)\ntarget_link_libraries(generic_feedback_controller_test gtest\n        gmock\n        gtest_main\n        cartpole_mppi)\ngtest_discover_tests(generic_feedback_controller_test)\nset_target_properties(generic_feedback_controller_test PROPERTIES FOLDER test)\n\nadd_executable(ddp_tests ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ddp_test.cu)\ntarget_link_libraries(ddp_tests gtest\n        gmock\n        gtest_main\n        cartpole_mppi)\ngtest_discover_tests(ddp_tests)\nset_target_properties(ddp_tests PROPERTIES FOLDER test)\n"
  },
  {
    "path": "tests/feedback_controllers/ddp_test.cu",
    "content": "#include <gtest/gtest.h>\n// #include <mppi/ddp/ddp_model_wrapper.h>\n// #include <mppi/ddp/ddp_tracking_costs.h>\n// #include <mppi/ddp/ddp.h>\n#include <mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh>\n#include <mppi/instantiations/quadrotor_mppi/quadrotor_mppi.cuh>\n\nclass ModelWrapper_Test : public testing::Test\n{\npublic:\n  CartpoleDynamics model = CartpoleDynamics(1, 1, 1);\n  std::shared_ptr<ModelWrapperDDP<CartpoleDynamics>> ddp_model =\n      std::make_shared<ModelWrapperDDP<CartpoleDynamics>>(&model);\n\n  CartpoleDynamics::state_array state;\n  CartpoleDynamics::control_array control;\n};\n\nTEST_F(ModelWrapper_Test, StateDerivative_1)\n{\n  CartpoleDynamics::state_array result;\n  CartpoleDynamics::state_array known_result;\n\n  state << 0, 0, 0, 0;\n  control << 0;\n  result = ddp_model->f(state, control);\n  known_result << 0, 0, 0, 0;\n\n  ASSERT_EQ(known_result, result);\n}\n\nTEST_F(ModelWrapper_Test, StateDerivative_2)\n{\n  CartpoleDynamics::state_array result;\n  CartpoleDynamics::state_array known_result;\n\n  state << 1, 2, 3, 4;\n  control << 5;\n  result = ddp_model->f(state, control);\n  model.computeStateDeriv(state, control, known_result);\n\n  ASSERT_EQ(known_result, result);\n}\n\nTEST_F(ModelWrapper_Test, Jacobian_1)\n{\n  CartpoleDynamics::Jacobian result;\n  CartpoleDynamics::Jacobian known_result;\n\n  CartpoleDynamics::dfdx A = CartpoleDynamics::dfdx::Zero();\n  CartpoleDynamics::dfdu B = CartpoleDynamics::dfdu::Zero();\n\n  state << 1, 2, 3, 4;\n  control << 5;\n\n  model.computeGrad(state, control, A, B);\n\n  known_result.leftCols<CartpoleDynamics::STATE_DIM>() = A;\n  known_result.rightCols<CartpoleDynamics::CONTROL_DIM>() = B;\n  result = ddp_model->df(state, control);\n\n  ASSERT_EQ(known_result, result);\n}\n\nclass TrackingCosts_Test : public testing::Test\n{\npublic:\n  typedef Eigen::Matrix<float, CartpoleDynamics::CONTROL_DIM, CartpoleDynamics::CONTROL_DIM> square_u_matrix;\n  typedef Eigen::Matrix<float, CartpoleDynamics::STATE_DIM + CartpoleDynamics::CONTROL_DIM,\n                        CartpoleDynamics::STATE_DIM + CartpoleDynamics::CONTROL_DIM>\n      square_xu_matrix;\n  CartpoleDynamics::dfdx Q;\n  square_u_matrix R;\n  square_xu_matrix QR;\n  int num_timesteps;\n\n  std::shared_ptr<TrackingCostDDP<ModelWrapperDDP<CartpoleDynamics>>> tracking_cost;\n  std::shared_ptr<TrackingTerminalCost<ModelWrapperDDP<CartpoleDynamics>>> terminal_cost;\n\n  CartpoleDynamics::state_array state;\n  CartpoleDynamics::control_array control;\n\nprotected:\n  void SetUp() override\n  {\n    Q = CartpoleDynamics::dfdx::Identity();\n    R = square_u_matrix::Identity();\n    QR = square_xu_matrix::Identity();\n    num_timesteps = 100;\n\n    tracking_cost = std::make_shared<TrackingCostDDP<ModelWrapperDDP<CartpoleDynamics>>>(Q, R, num_timesteps);\n    terminal_cost = std::make_shared<TrackingTerminalCost<ModelWrapperDDP<CartpoleDynamics>>>(Q);\n\n    state.resize(CartpoleDynamics::STATE_DIM, 1);\n    control.resize(CartpoleDynamics::CONTROL_DIM, 1);\n  }\n};\n\nTEST_F(TrackingCosts_Test, ComputeCost)\n{\n  state << 1, 2, 3, 4;\n  control << 5;\n  int timestep = 0;\n  ASSERT_FLOAT_EQ(1 + 4 + 9 + 16 + 25, tracking_cost->c(state, control, timestep));\n  ASSERT_FLOAT_EQ(1 + 4 + 9 + 16, terminal_cost->c(state));\n}\n\nTEST_F(TrackingCosts_Test, ComputeCostGradient)\n{\n  Eigen::Matrix<float, 5, 1> known_result;\n  state << 1, 2, 3, 4;\n  control << 5;\n  known_result.block(0, 0, 4, 1) = state;\n  known_result.block(4, 0, 1, 1) = control;\n  //  std::cout << known_result << std::endl;\n  int timestep = 0;\n  ASSERT_EQ(QR * known_result, tracking_cost->dc(state, control, timestep))\n      << \"Known:\\n\"\n      << QR * known_result << \"\\nTracking cost: \\n\"\n      << tracking_cost->dc(state, control, timestep);\n  ASSERT_EQ(Q * state, terminal_cost->dc(state)) << \"Known:\\n\"\n                                                 << Q * state << \"\\nTerminal cost: \\n\"\n                                                 << terminal_cost->dc(state);\n}\n\nTEST_F(TrackingCosts_Test, ComputeCostHessian)\n{\n  state << 1, 2, 3, 4;\n  control << 5;\n  int timestep = 0;\n  ASSERT_EQ(QR, tracking_cost->d2c(state, control, timestep));\n  ASSERT_EQ(Q, terminal_cost->d2c(state));\n}\n\nTEST(DDPSolver_Test, Cartpole_Tracking)\n{\n  using SAMPLER_T = mppi::sampling_distributions::GaussianDistribution<typename CartpoleDynamics::DYN_PARAMS_T>;\n  // SETUP THE MPPI CONTROLLER\n  CartpoleDynamics model = CartpoleDynamics(1.0, 1.0, 1.0);\n  CartpoleQuadraticCost cost;\n\n  CartpoleQuadraticCostParams new_params;\n  new_params.cart_position_coeff = 100;\n  new_params.pole_angle_coeff = 200;\n  new_params.cart_velocity_coeff = 10;\n  new_params.pole_angular_velocity_coeff = 20;\n  new_params.control_cost_coeff[0] = 1;\n  new_params.terminal_cost_coeff = 0;\n  new_params.desired_terminal_state[0] = 0;\n  new_params.desired_terminal_state[1] = 0;\n  new_params.desired_terminal_state[2] = M_PI;\n  new_params.desired_terminal_state[3] = 0;\n\n  cost.setParams(new_params);\n\n  float dt = 0.01;\n  int max_iter = 100;\n  float lambda = 0.25;\n  float alpha = 0.001;\n  const int num_timesteps = 100;\n  auto fb_controller = DDPFeedback<CartpoleDynamics, num_timesteps>(&model, dt);\n\n  auto sampler = SAMPLER_T();\n  auto sampler_params = sampler.getParams();\n  for (int i = 0; i < CartpoleDynamics::CONTROL_DIM; i++)\n  {\n    sampler_params.std_dev[i] = 5.0;\n  }\n  sampler.setParams(sampler_params);\n\n  DDPParams<CartpoleDynamics> fb_params;\n  fb_params.Q = 100 *\n                CartpoleDynamics::dfdx::\n                    Identity();  // Eigen::MatrixXf::Identity(CartpoleDynamics::STATE_DIM,CartpoleDynamics::STATE_DIM);\n  fb_params.R = TrackingCosts_Test::square_u_matrix::\n      Identity();  // Eigen::MatrixXf::Identity(CartpoleDynamics::CONTROL_DIM,CartpoleDynamics::CONTROL_DIM);\n  fb_params.Q_f = fb_params.Q;\n  fb_params.num_iterations = 20;\n  fb_controller.setParams(fb_params);\n  fb_controller.initTrackingController();\n\n  auto controller = VanillaMPPIController<CartpoleDynamics, CartpoleQuadraticCost,\n                                          DDPFeedback<CartpoleDynamics, num_timesteps>, num_timesteps, 2048, SAMPLER_T>(\n      &model, &cost, &fb_controller, &sampler, dt, max_iter, lambda, alpha);\n\n  auto controller_params = controller.getParams();\n  controller_params.dynamics_rollout_dim_ = dim3(64, 8, 1);\n  controller_params.cost_rollout_dim_ = dim3(num_timesteps, 1, 1);\n  controller.setParams(controller_params);\n  CartpoleDynamics::state_array current_state = model.getZeroState();\n  // Compute the control\n  controller.computeControl(current_state, 0);\n  auto nominal_state = controller.getTargetStateSeq();\n  auto nominal_control = controller.getControlSeq();\n  // END MPPI CONTROLLER\n\n  // util::DefaultLogger logger;\n  // bool verbose = false;\n  // int num_iterations = 20;\n\n  // Eigen::MatrixXf Q;\n  // Eigen::MatrixXf R;\n  // Eigen::MatrixXf QR;\n  // Q = 100*Eigen::MatrixXf::Identity(CartpoleDynamics::STATE_DIM,CartpoleDynamics::STATE_DIM);\n  // R = Eigen::MatrixXf::Identity(CartpoleDynamics::CONTROL_DIM,CartpoleDynamics::CONTROL_DIM);\n  // QR = Eigen::MatrixXf(CartpoleDynamics::STATE_DIM+ CartpoleDynamics::CONTROL_DIM,\n  //                      CartpoleDynamics::STATE_DIM + CartpoleDynamics::CONTROL_DIM);\n  // QR.template topLeftCorner<CartpoleDynamics::STATE_DIM, CartpoleDynamics::STATE_DIM>() = Q;\n  // QR.template bottomRightCorner<CartpoleDynamics::CONTROL_DIM, CartpoleDynamics::CONTROL_DIM>() = R;\n\n  // std::shared_ptr<ModelWrapperDDP<CartpoleDynamics>> ddp_model =\n  // std::make_shared<ModelWrapperDDP<CartpoleDynamics>>(&model);\n\n  // std::shared_ptr<DDP<ModelWrapperDDP<CartpoleDynamics>>> ddp_solver_ =\n  //         std::make_shared<DDP<ModelWrapperDDP<CartpoleDynamics>>>(dt, num_timesteps, num_iterations, &logger,\n  //         verbose);\n\n  // std::shared_ptr<TrackingCostDDP<ModelWrapperDDP<CartpoleDynamics>>> tracking_cost =\n  //         std::make_shared<TrackingCostDDP<ModelWrapperDDP<CartpoleDynamics>>>(Q,R,num_timesteps);\n\n  // std::shared_ptr<TrackingTerminalCost<ModelWrapperDDP<CartpoleDynamics>>> terminal_cost =\n  //         std::make_shared<TrackingTerminalCost<ModelWrapperDDP<CartpoleDynamics>>>(Q);\n\n  // tracking_cost->setTargets(nominal_state.data(), nominal_control.data(), num_timesteps);\n  // terminal_cost->xf = tracking_cost->traj_target_x_.col(num_timesteps - 1);\n\n  Eigen::Matrix<float, CartpoleDynamics::STATE_DIM, 1> s;\n  s << 0.0, 0.0, 0.0, 0.0;\n\n  Eigen::MatrixXf control_traj = Eigen::MatrixXf::Zero(CartpoleDynamics::CONTROL_DIM, num_timesteps);\n\n  fb_controller.computeFeedback(s, nominal_state, nominal_control);\n\n  // OptimizerResult<ModelWrapperDDP<CartpoleDynamics>> result_ = ddp_solver_->run(s, control_traj,\n  //                                                                    *ddp_model, *tracking_cost, *terminal_cost);\n\n  //  std::cout << result_.state_trajectory << std::endl;\n\n  for (int i = 0; i < num_timesteps; ++i)\n  {\n    ASSERT_NEAR((nominal_state.col(i) - fb_controller.result_.state_trajectory.col(i)).norm(), 0.0f, 1e-2)\n        << \"Failed on timestep: \" << i;\n  }\n}\n\nTEST(DDPSolver_Test, Quadrotor_Tracking)\n{\n  const int num_timesteps = 500;\n\n  using DYN = QuadrotorDynamics;\n  using COST = QuadrotorQuadraticCost;\n  using CONTROLLER = VanillaMPPIController<DYN, COST, DDPFeedback<DYN, num_timesteps>, num_timesteps, 2048>;\n\n  std::array<float2, DYN::CONTROL_DIM> control_ranges;\n  for (int i = 0; i < 3; i++)\n  {\n    control_ranges[i] = make_float2(-2.5, 2.5);\n  }\n  control_ranges[3] = make_float2(0, 36);\n  DYN model(control_ranges);\n\n  DYN::state_array x_goal;\n  x_goal << 6, 4, 3,               // position\n      0, 0, 0,                     // velocity\n      0.7071068, 0, 0, 0.7071068,  // quaternion\n      0, 0, 0;                     // angular speed\n\n  float dt = 0.01;\n\n  // Create DDP and find feedback gains\n  util::DefaultLogger logger;\n  // bool verbose = false;\n\n  // CONTROLLER::StateCostWeight Q;\n  // CONTROLLER::ControlCostWeight R;\n  // CONTROLLER::Hessian Qf;\n  DDPParams<DYN> fb_params;\n  fb_params.Q = DDPParams<DYN>::StateCostWeight::Identity();\n  fb_params.R = DDPParams<DYN>::ControlCostWeight::Identity();\n  fb_params.Q_f = DDPParams<DYN>::Hessian::Identity();\n  fb_params.Q.diagonal() << 25, 25, 300, 15, 15, 300, 0, 0, 0, 0, 30, 30, 30;\n  fb_params.Q_f.diagonal() << 250, 250, 3000, 150, 150, 3000, 0, 0, 0, 0, 300, 300, 300;\n  fb_params.R.diagonal() << 550, 550, 550, 1;\n  fb_params.num_iterations = 100;\n\n  Eigen::MatrixXf control_traj = CONTROLLER::control_trajectory::Zero();\n  CONTROLLER::state_trajectory ddp_state_traj = CONTROLLER::state_trajectory::Zero();\n  for (int i = 0; i < num_timesteps; i++)\n  {\n    ddp_state_traj.col(i) = x_goal;\n    control_traj.col(i) = model.zero_control_;\n  }\n\n  auto fb_controller = DDPFeedback<DYN, num_timesteps>(&model, dt);\n  fb_controller.setParams(fb_params);\n  fb_controller.initTrackingController();\n\n  // auto ddp_model = std::make_shared<ModelWrapperDDP<DYN>>(&model);\n\n  // auto ddp_solver = std::make_shared<DDP<ModelWrapperDDP<DYN>>>(dt,\n  //                                                               num_timesteps,\n  //                                                               num_iterations,\n  //                                                               &logger,\n  //                                                               verbose);\n\n  // auto tracking_cost =\n  //     std::make_shared<TrackingCostDDP<ModelWrapperDDP<DYN>>>(Q, R, num_timesteps);\n\n  // auto terminal_cost =\n  //     std::make_shared<TrackingTerminalCost<ModelWrapperDDP<DYN>>>(Qf);\n\n  // tracking_cost->setTargets(ddp_state_traj.data(), control_traj.data(),\n  //                           num_timesteps);\n  // terminal_cost->xf = tracking_cost->traj_target_x_.col(num_timesteps - 1);\n\n  DYN::state_array x_real;\n  x_real << 0, -0.5, 0,  // position\n      0, 0.5, 0,         // velocity\n      1, 0, 0, 0,        // quaternion\n      0, 0, 0;           // angular speed\n\n  // DYN::control_array control_min, control_max;\n  // for (int i = 0; i < DYN::CONTROL_DIM; i++) {\n  //   control_min(i) = control_ranges[i].x;\n  //   control_max(i) = control_ranges[i].y;\n  // }\n\n  std::cout << \"Starting DDP\" << std::endl;\n  fb_controller.computeFeedback(x_real, ddp_state_traj, control_traj);\n  // OptimizerResult<ModelWrapperDDP<DYN>> result_ = ddp_solver->run(x_real,\n  //                                                                 control_traj,\n  //                                                                 *ddp_model,\n  //                                                                 *tracking_cost,\n  //                                                                 *terminal_cost,\n  //                                                                 control_min,\n  //                                                                 control_max);\n  auto control_feedback_gains = fb_controller.result_.feedback_gain;\n  std::cout << \"DDP Optimal State Sequence:\\n\" << fb_controller.result_.state_trajectory.transpose() << std::endl;\n  DYN::state_array x_deriv, x, x_nom;\n  x = x_real;\n  DYN::control_array u_total, fb_u;\n  for (int t = 0; t < num_timesteps; ++t)\n  {\n    x_nom = fb_controller.result_.state_trajectory.col(t);\n    fb_u = fb_controller.k(x, x_nom, t);\n    u_total = fb_u;\n    model.enforceConstraints(x, u_total);\n    std::cout << \" t = \" << t * dt << \", State_diff norm: \" << (x - x_goal).norm()\n              << \", Control:  \" << u_total.transpose() << std::endl;\n    std::cout << \"Feedback matrix:\\n\" << control_feedback_gains[t] << std::endl;\n    model.computeStateDeriv(x, u_total, x_deriv);\n    model.updateState(x, x_deriv, dt);\n  }\n  std::cout << std::fixed << std::setprecision(5) << \"X_goal: \" << x_goal.transpose() << \"\\nX_end: \" << x.transpose()\n            << \"\\ndiff in state: \" << (x - x_goal).norm() << std::endl;\n  EXPECT_LE((x - x_goal).norm(), 3);\n}\n"
  },
  {
    "path": "tests/feedback_controllers/generic_feedback_controller_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi_test/mock_classes/mock_dynamics.h>\n#include <mppi/feedback_controllers/feedback.cuh>\n\ntemplate <int STATE_DIM = 1, int CONTROL_DIM = 1, int OUTPUT_DIM = 1>\nstruct DynamicsTesterParams : public DynamicsParams\n{\n  enum class StateIndex : int\n  {\n    NUM_STATES = STATE_DIM\n  };\n  enum class ControlIndex : int\n  {\n    NUM_CONTROLS = CONTROL_DIM\n  };\n  enum class OutputIndex : int\n  {\n    NUM_OUTPUTS = OUTPUT_DIM\n  };\n  int var_1 = 1;\n  int var_2 = 2;\n  float4 var_4;\n};\n\nstruct FeedbackParams\n{\n  int var_10 = 10;\n  int var_20 = 20;\n  float var_30 = 3.14;\n};\n\ntemplate <int STATE_DIM = 1, int CONTROL_DIM = 1>\nclass DynamicsTester\n  : public MPPI_internal::Dynamics<DynamicsTester<STATE_DIM, CONTROL_DIM>, DynamicsTesterParams<STATE_DIM, CONTROL_DIM>>\n{\npublic:\n  using PARENT_CLASS =\n      MPPI_internal::Dynamics<DynamicsTester<STATE_DIM, CONTROL_DIM>, DynamicsTesterParams<STATE_DIM, CONTROL_DIM>>;\n  using state_array = typename PARENT_CLASS::state_array;\n  using control_array = typename PARENT_CLASS::control_array;\n\n  DynamicsTester(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n\n  DynamicsTester(std::array<float2, CONTROL_DIM> control_rngs, cudaStream_t stream = 0)\n    : PARENT_CLASS(control_rngs, stream)\n  {\n  }\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der)\n  {\n    state_der(1) = control(0);\n  }\n\n  void computeKinematics(const Eigen::Ref<const state_array>& state, Eigen::Ref<state_array> s_der)\n  {\n    s_der(0) = state(0) + state(1);\n  };\n\n  // TODO must be properly parallelized\n  __device__ void computeDynamics(float* state, float* control, float* state_der, float* theta_s = nullptr)\n  {\n    state_der[1] = control[0];\n  }\n\n  // TODO must be properly parallelized\n  __device__ void computeKinematics(float* state, float* state_der)\n  {\n    state_der[0] = state[0] + state[1];\n  }\n};\n\nstruct TestGPUState : GPUState\n{\n  int testing = 5;\n};\n\nclass TestGPUFeedbackController\n  : public GPUFeedbackController<TestGPUFeedbackController, DynamicsTester<>, TestGPUState>\n{\npublic:\n  typedef MockDynamics DYN_T;\n  typedef GPUFeedbackController<TestGPUFeedbackController, DynamicsTester<>, TestGPUState> PARENT_CLASS;\n\n  TestGPUFeedbackController(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n\n  void allocateCudaMemory()\n  {\n  }\n\n  void deallocateCUDAMemory()\n  {\n  }\n\n  void copyToDevice()\n  {\n  }\n\n  void copyFromDevice()\n  {\n  }\n};\n\nclass TestFeedbackController : public FeedbackController<TestGPUFeedbackController, FeedbackParams, 10>\n{\npublic:\n  typedef FeedbackController<TestGPUFeedbackController, FeedbackParams, 10> PARENT_CLASS;\n  using INTERNAL_STATE_T = typename PARENT_CLASS::TEMPLATED_FEEDBACK_STATE;\n\n  TestFeedbackController(float dt = 0.01, int num_timesteps = 10, cudaStream_t stream = 0)\n    : PARENT_CLASS(dt, num_timesteps, stream)\n  {\n  }\n\n  control_array k_(const Eigen::Ref<const state_array>& x_act, const Eigen::Ref<const state_array>& x_goal, int t,\n                   INTERNAL_STATE_T& fb_state) override\n  {\n  }\n\n  // might not be a needed method\n  void computeFeedback(const Eigen::Ref<const state_array>& init_state,\n                       const Eigen::Ref<const state_trajectory>& goal_traj,\n                       const Eigen::Ref<const control_trajectory>& control_traj) override\n  {\n  }\n  void initTrackingController() override\n  {\n  }\n};\n\nTEST(FeedbackController, Constructor)\n{\n  cudaStream_t stream;\n\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  TestFeedbackController feedbackController(0.01, 10, stream);\n  feedbackController.GPUSetup();\n\n  EXPECT_EQ(feedbackController.getHostPointer()->stream_, stream) << \"Stream binding failure.\";\n  EXPECT_EQ(feedbackController.getHostPointer()->GPUMemStatus_, true) << \"GPU not set up\";\n  EXPECT_NE(feedbackController.getDevicePointer(), nullptr) << \"GPU is not in device pointer\";\n\n  HANDLE_ERROR(cudaStreamDestroy(stream));\n}\n\nTEST(FeedbackController, test)\n{\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/core/normexp_kernel_test.cu",
    "content": "#include \"normexp_kernel_test.cuh\"\n\ntemplate <int NUM_ROLLOUTS>\nvoid launchNormExp_KernelTest(std::array<float, NUM_ROLLOUTS>& trajectory_costs_host, float gamma, float baseline,\n                              std::array<float, NUM_ROLLOUTS>& normalized_compute)\n{\n  // Allocate CUDA memory\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * trajectory_costs_host.size()))\n\n  HANDLE_ERROR(cudaMemcpy(trajectory_costs_d, trajectory_costs_host.data(),\n                          sizeof(float) * trajectory_costs_host.size(), cudaMemcpyHostToDevice))\n\n  mppi::kernels::normExpKernel<<<1, NUM_ROLLOUTS>>>(NUM_ROLLOUTS, trajectory_costs_d, gamma, baseline);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(normalized_compute.data(), trajectory_costs_d, sizeof(float) * trajectory_costs_host.size(),\n                          cudaMemcpyDeviceToHost))\n\n  cudaFree(trajectory_costs_d);\n}\n\ntemplate <int NUM_ROLLOUTS, int BLOCKSIZE_X>\nvoid launchGenericNormExpKernelTest(std::array<float, NUM_ROLLOUTS> trajectory_costs_host, float gamma, float baseline,\n                                    std::array<float, NUM_ROLLOUTS>& normalized_compute)\n{\n  // Allocate CUDA memory\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * trajectory_costs_host.size()));\n\n  HANDLE_ERROR(cudaMemcpy(trajectory_costs_d, trajectory_costs_host.data(),\n                          sizeof(float) * trajectory_costs_host.size(), cudaMemcpyHostToDevice));\n\n  dim3 dimBlock(BLOCKSIZE_X, 1, 1);\n  dim3 dimGrid((NUM_ROLLOUTS - 1) / BLOCKSIZE_X + 1, 1, 1);\n\n  mppi::kernels::normExpKernel<<<dimGrid, dimBlock>>>(NUM_ROLLOUTS, trajectory_costs_d, gamma, baseline);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(normalized_compute.data(), trajectory_costs_d, sizeof(float) * trajectory_costs_host.size(),\n                          cudaMemcpyDeviceToHost));\n\n  cudaFree(trajectory_costs_d);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/core/normexp_kernel_test.cuh",
    "content": "#pragma once\n\n#ifndef MPPI_GENERIC_KERNEL_TESTS_MPPI_CORE_NORMEXP_KERNEL_TEST_CUH_\n#define MPPI_GENERIC_KERNEL_TESTS_MPPI_CORE_NORMEXP_KERNEL_TEST_CUH_\n\n#include <mppi/core/mppi_common.cuh>\n#include <array>\n\ntemplate <int NUM_ROLLOUTS>\nvoid launchNormExp_KernelTest(std::array<float, NUM_ROLLOUTS>& trajectory_costs_host, float gamma, float baseline,\n                              std::array<float, NUM_ROLLOUTS>& normalized_compute);\n\ntemplate <int NUM_ROLLOUTS, int BLOCKSIZE_X>\nvoid launchGenericNormExpKernelTest(std::array<float, NUM_ROLLOUTS> trajectory_costs_host, float gamma, float baseline,\n                                    std::array<float, NUM_ROLLOUTS>& normalized_compute);\n\ntemplate <int NUM_ROLLOUTS, int BLOCKSIZE_X, int BLOCKSIZE_Y>\nvoid launchAutorallyNormExpKernelTest(std::array<float, NUM_ROLLOUTS> trajectory_costs_host, float gamma,\n                                      float baseline, std::array<float, NUM_ROLLOUTS>& normalized_compute);\n\n#if __CUDACC__\n#include \"normexp_kernel_test.cu\"\n#endif\n\n#endif  //! MPPI_GENERIC_KERNEL_TESTS_MPPI_CORE_NORMEXP_KERNEL_TEST_CUH_"
  },
  {
    "path": "tests/include/kernel_tests/core/rmppi_kernel_test.cu",
    "content": "//\n// Created by mgandhi on 5/23/20.\n//\n\n#include \"rmppi_kernel_test.cuh\"\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T, class FB_T>\nvoid launchCPURMPPIRolloutKernel(DYN_T* model, COST_T* cost, SAMPLER_T* sampler, FB_T* fb_controller, const float dt,\n                                 const int num_timesteps, const int num_rollouts, const float lambda, const float alpha,\n                                 const float value_func_threshold, const int nominal_idx, const int real_idx,\n                                 const Eigen::Ref<const typename DYN_T::state_array>& initial_real_state,\n                                 const Eigen::Ref<const typename DYN_T::state_array>& initial_nominal_state,\n                                 Eigen::Ref<Eigen::MatrixXf> trajectory_costs)\n{\n  using state_array = typename DYN_T::state_array;\n  using output_array = typename DYN_T::output_array;\n  using control_array = typename DYN_T::control_array;\n  Eigen::MatrixXf control_noise_real = Eigen::MatrixXf::Zero(DYN_T::CONTROL_DIM, num_rollouts * num_timesteps);\n  Eigen::MatrixXf control_noise_nom = Eigen::MatrixXf::Zero(DYN_T::CONTROL_DIM, num_rollouts * num_timesteps);\n  Eigen::MatrixXi crash_status_real = Eigen::MatrixXi::Zero(num_rollouts, 1);\n  Eigen::MatrixXi crash_status_nom = Eigen::MatrixXi::Zero(num_rollouts, 1);\n  HANDLE_ERROR(cudaMemcpy(control_noise_nom.data(), sampler->getControlSample(0, 0, nominal_idx),\n                          sizeof(float) * DYN_T::CONTROL_DIM * num_rollouts * num_timesteps, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(control_noise_real.data(), sampler->getControlSample(0, 0, real_idx),\n                          sizeof(float) * DYN_T::CONTROL_DIM * num_rollouts * num_timesteps, cudaMemcpyDeviceToHost));\n\n  state_array curr_real_state, curr_nom_state, next_real_state, next_nom_state, state_der_real, state_der_nom;\n  output_array output_real, output_nom;\n  control_array u_real, u_nom, u_feedback;\n  auto sampler_params = sampler->getParams();\n  for (int sample_idx = 0; sample_idx < num_rollouts; sample_idx++)\n  {\n    curr_real_state = initial_real_state;\n    curr_nom_state = initial_nominal_state;\n    float running_real_cost = 0;\n    float running_nom_cost = 0;\n    float tracking_nom_cost = 0;\n    float tracking_real_cost = 0;\n    model->initializeDynamics(curr_real_state, u_real, output_real, 0, dt);\n    cost->initializeCosts(output_real, u_real, 0, dt);\n    for (int t = 0; t < num_timesteps; t++)\n    {\n      u_real = control_noise_real.col(sample_idx * num_timesteps + t);\n      u_nom = control_noise_nom.col(sample_idx * num_timesteps + t);\n      u_feedback = fb_controller->k(curr_real_state, curr_nom_state, t);\n      u_real += u_feedback;\n      model->enforceConstraints(curr_real_state, u_real);\n      model->enforceConstraints(curr_nom_state, u_nom);\n      model->step(curr_real_state, next_real_state, state_der_real, u_real, output_real, t, dt);\n      model->step(curr_nom_state, next_nom_state, state_der_nom, u_nom, output_nom, t, dt);\n      float real_cost = cost->computeRunningCost(output_real, u_real, t, &crash_status_real(sample_idx));\n      float nom_cost = cost->computeRunningCost(output_nom, u_nom, t, &crash_status_nom(sample_idx));\n      tracking_real_cost += real_cost + sampler->computeFeedbackCost(u_feedback.data(), (float*)&sampler_params, t,\n                                                                     real_idx, lambda, alpha);\n      running_real_cost += real_cost + sampler->computeLikelihoodRatioCost(u_real, t, real_idx, lambda, alpha);\n      running_nom_cost += nom_cost;\n      tracking_nom_cost += sampler->computeLikelihoodRatioCost(u_nom, t, nominal_idx, lambda, alpha);\n\n      curr_real_state = next_real_state;\n      curr_nom_state = next_nom_state;\n    }\n    running_real_cost += cost->terminalCost(output_real);\n    tracking_real_cost += cost->terminalCost(output_real);\n    running_nom_cost += cost->terminalCost(output_nom);\n\n    tracking_nom_cost /= num_timesteps;\n    tracking_real_cost /= num_timesteps;\n    running_nom_cost /= num_timesteps;\n    running_real_cost /= num_timesteps;\n\n    running_nom_cost =\n        0.5f * running_nom_cost + 0.5f * fmaxf(fminf(tracking_real_cost, value_func_threshold), running_nom_cost);\n    running_nom_cost += tracking_nom_cost;\n    trajectory_costs(sample_idx + nominal_idx * num_rollouts, 0) = running_nom_cost;\n    trajectory_costs(sample_idx + real_idx * num_rollouts, 0) = running_real_cost;\n  }\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nvoid launchCPUInitEvalKernel(DYN_T* model, COST_T* cost, SAMPLER_T* sampler, const float dt, const int num_timesteps,\n                             const int num_candidates, const int num_samples, const float lambda, const float alpha,\n                             const Eigen::Ref<const Eigen::MatrixXf>& candidates,\n                             const Eigen::Ref<const Eigen::MatrixXi>& strides,\n                             Eigen::Ref<Eigen::MatrixXf> trajectory_costs)\n{\n  using state_array = typename DYN_T::state_array;\n  using output_array = typename DYN_T::output_array;\n  using control_array = typename DYN_T::control_array;\n  const int num_rollouts = num_candidates * num_samples;\n  Eigen::MatrixXi crash_status = Eigen::MatrixXi::Zero(num_rollouts, 1);\n\n  // Get control samples from sampler\n  Eigen::MatrixXf control_noise = Eigen::MatrixXf::Zero(DYN_T::CONTROL_DIM, num_rollouts * num_timesteps);\n  HANDLE_ERROR(cudaMemcpy(control_noise.data(), sampler->getControlSample(0, 0, 0),\n                          sizeof(float) * DYN_T::CONTROL_DIM * num_rollouts * num_timesteps, cudaMemcpyDeviceToHost));\n\n  state_array curr_state, next_state, state_der;\n  output_array output;\n  control_array u;\n  for (int candidate_idx = 0; candidate_idx < num_candidates; candidate_idx++)\n  {\n    for (int sample_idx = 0; sample_idx < num_samples; sample_idx++)\n    {\n      int global_idx = candidate_idx * num_samples + sample_idx;\n      float& running_cost = trajectory_costs(global_idx, 0);\n      curr_state = candidates.col(candidate_idx);\n      model->initializeDynamics(curr_state, u, output, 0, dt);\n      cost->initializeCosts(output, u, 0, dt);\n      // sampler->initializeDistributions();\n      for (int t = 0; t < num_timesteps; t++)\n      {\n        int control_index = sample_idx * num_timesteps + min(t + strides(candidate_idx, 0), num_timesteps - 1);\n        u = control_noise.col(control_index);\n        model->enforceConstraints(curr_state, u);\n        model->step(curr_state, next_state, state_der, u, output, t, dt);\n        // if (t > 0)\n        // {\n        running_cost += cost->computeRunningCost(output, u, t, &crash_status(global_idx));\n        running_cost += sampler->computeLikelihoodRatioCost(u, t, 0, lambda, alpha);\n        // }\n        curr_state = next_state;\n      }\n      running_cost += cost->terminalCost(output);\n      running_cost /= (num_timesteps);\n    }\n  }\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/core/rmppi_kernel_test.cuh",
    "content": "//\n// Created by mgandhi on 5/23/20.\n//\n\n#ifndef MPPIGENERIC_RMPPI_KERNEL_TEST_CUH\n#define MPPIGENERIC_RMPPI_KERNEL_TEST_CUH\n\n#include <mppi/core/rmppi_kernels.cuh>\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T, class FB_T>\nvoid launchCPURMPPIRolloutKernel(DYN_T* model, COST_T* cost, SAMPLER_T* sampler, FB_T* fb_controller, const float dt,\n                                 const int num_timesteps, const int num_rollouts, const float lambda, const float alpha,\n                                 const float value_func_threshold, const int nominal_idx, const int real_idx,\n                                 const Eigen::Ref<const typename DYN_T::state_array>& initial_real_state,\n                                 const Eigen::Ref<const typename DYN_T::state_array>& initial_nominal_state,\n                                 Eigen::Ref<Eigen::MatrixXf> trajectory_costs);\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nvoid launchCPUInitEvalKernel(DYN_T* model, COST_T* cost, SAMPLER_T* sampler, const float dt, const int num_timesteps,\n                             const int num_candidates, const int num_samples, const float lambda, const float alpha,\n                             const Eigen::Ref<const Eigen::MatrixXf>& candidates,\n                             const Eigen::Ref<const Eigen::MatrixXi>& strides,\n                             Eigen::Ref<Eigen::MatrixXf> trajectory_costs);\n\n#if __CUDACC__\n#include \"rmppi_kernel_test.cu\"\n#endif\n\n#endif  // MPPIGENERIC_RMPPI_KERNEL_TEST_CUH\n"
  },
  {
    "path": "tests/include/kernel_tests/core/rollout_kernel_test.cu",
    "content": "#include \"rollout_kernel_test.cuh\"\n\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n\n// const int STATE_DIM = 12;\n// const int CONTROL_DIM = 3;\n// const int NUM_ROLLOUTS = 100; // .99 times this number has to be an integer... TODO fix how brittle this is\n// const int BLOCKSIZE_X = 32;\n// const int BLOCKSIZE_Y = 8; // Blocksize_y has to be greater than the control dim TODO fix how we step through the\n// controls\n\ntemplate <int BLOCKSIZE_Z>\n__global__ void loadGlobalToShared_KernelTest(float* x0_device, float* x_thread_device, float* xdot_thread_device,\n                                              float* u_thread_device)\n{\n  const int STATE_DIM = 12;\n  const int CONTROL_DIM = 3;\n  const int NUM_ROLLOUTS = 100;  // .99 times this number has to be an integer... TODO fix how brittle this is\n  const int BLOCKSIZE_X = 32;\n  const int BLOCKSIZE_Y =\n      8;  // Blocksize_y has to be greater than the control dim TODO fix how we step through the controls\n\n  int thread_idx = threadIdx.x;\n  int thread_idy = threadIdx.y;\n  int thread_idz = threadIdx.z;\n  int block_idx = blockIdx.x;\n  int global_idx = threadIdx.x + block_idx * blockDim.x;\n\n  // Create shared arrays which hold state and control data\n  __shared__ float x_shared[BLOCKSIZE_X * STATE_DIM * BLOCKSIZE_Z];\n  __shared__ float xdot_shared[BLOCKSIZE_X * STATE_DIM * BLOCKSIZE_Z];\n  __shared__ float u_shared[BLOCKSIZE_X * CONTROL_DIM * BLOCKSIZE_Z];\n\n  float* x_thread;\n  float* xdot_thread;\n\n  float* u_thread;\n  float* du_thread;\n\n  if (global_idx < NUM_ROLLOUTS)\n  {\n    x_thread = &x_shared[(blockDim.x * thread_idz + thread_idx) * STATE_DIM];\n    xdot_thread = &xdot_shared[(blockDim.x * thread_idz + thread_idx) * STATE_DIM];\n    u_thread = &u_shared[(blockDim.x * thread_idz + thread_idx) * CONTROL_DIM];\n  }\n  __syncthreads();\n  mppi::kernels::loadGlobalToShared<STATE_DIM, CONTROL_DIM>(NUM_ROLLOUTS, BLOCKSIZE_Y, global_idx, thread_idy,\n                                                            thread_idz, x0_device, x_thread, xdot_thread, u_thread);\n  __syncthreads();\n\n  // Check if on the first rollout the correct values were copied over\n  // Prevent y threads from all writing to the same memory\n  if (global_idx == 1 && thread_idy == 0)\n  {\n    for (int i = 0; i < STATE_DIM; ++i)\n    {\n      int ind = i + thread_idz * STATE_DIM;\n      int ind_thread = i + thread_idz * STATE_DIM * blockDim.x;\n      x_thread_device[ind] = x_shared[ind_thread];\n      xdot_thread_device[ind] = xdot_shared[ind_thread];\n    }\n\n    for (int i = 0; i < CONTROL_DIM; ++i)\n    {\n      int ind = i + thread_idz * CONTROL_DIM;\n      int ind_thread = i + thread_idz * CONTROL_DIM * blockDim.x;\n      u_thread_device[ind] = u_shared[ind_thread];\n    }\n    __syncthreads();\n  }\n\n  // To test what the results are, we have to return them back to the host.\n}\n\nvoid launchGlobalToShared_KernelTest(const std::vector<float>& x0_host, std::vector<float>& x_thread_host,\n                                     std::vector<float>& xdot_thread_host, std::vector<float>& u_thread_host)\n{\n  const int STATE_DIM = 12;\n  const int CONTROL_DIM = 3;\n  const int NUM_ROLLOUTS = 100;  // .99 times this number has to be an integer... TODO fix how brittle this is\n  const int BLOCKSIZE_X = 32;\n  const int BLOCKSIZE_Y =\n      8;  // Blocksize_y has to be greater than the control dim TODO fix how we step through the controls\n\n  // Define the initial condition x0_device and the exploration variance in global device memory\n  float* x0_device;\n  HANDLE_ERROR(cudaMalloc((void**)&x0_device, sizeof(float) * STATE_DIM));\n  HANDLE_ERROR(cudaMemcpy(x0_device, x0_host.data(), sizeof(float) * STATE_DIM, cudaMemcpyHostToDevice));\n\n  // Define the return arguments in global device memory\n  float* x_thread_device;\n  float* xdot_thread_device;\n  float* u_thread_device;\n\n  HANDLE_ERROR(cudaMalloc((void**)&x_thread_device, sizeof(float) * STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&xdot_thread_device, sizeof(float) * STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&u_thread_device, sizeof(float) * CONTROL_DIM));\n\n  dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y);\n  dim3 dimGrid(2048);\n\n  loadGlobalToShared_KernelTest<<<dimGrid, dimBlock>>>(x0_device, x_thread_device, xdot_thread_device, u_thread_device);\n  CudaCheckError();\n\n  // Copy the data back to the host\n  HANDLE_ERROR(cudaMemcpy(x_thread_host.data(), x_thread_device, sizeof(float) * STATE_DIM, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(xdot_thread_host.data(), xdot_thread_device, sizeof(float) * STATE_DIM, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(u_thread_host.data(), u_thread_device, sizeof(float) * CONTROL_DIM, cudaMemcpyDeviceToHost));\n\n  // Free the cuda memory that we allocated\n  cudaFree(x0_device);\n\n  cudaFree(x_thread_device);\n  cudaFree(xdot_thread_device);\n  cudaFree(u_thread_device);\n}\n\n/**\n * Tube-MPPI versions of the kernel tests\n */\n\n// This is to test tube-mppi calls to the kernel\nvoid launchGlobalToShared_KernelTest_nom_act(\n    const std::vector<float>& x0_host_act, std::vector<float>& x_thread_host_act,\n    std::vector<float>& xdot_thread_host_act, std::vector<float>& u_thread_host_act,\n    const std::vector<float>& x0_host_nom, std::vector<float>& x_thread_host_nom,\n    std::vector<float>& xdot_thread_host_nom, std::vector<float>& u_thread_host_nom)\n{\n  const int STATE_DIM = 12;\n  const int CONTROL_DIM = 3;\n  const int NUM_ROLLOUTS = 100;  // .99 times this number has to be an integer... TODO fix how brittle this is\n  const int BLOCKSIZE_X = 32;\n  const int BLOCKSIZE_Y =\n      8;  // Blocksize_y has to be greater than the control dim TODO fix how we step through the controls\n\n  // Define the initial condition x0_device and the exploration variance in global device memory\n  // Need twice as much memory for tube-mppi\n  float* x0_device;\n  HANDLE_ERROR(cudaMalloc((void**)&x0_device, sizeof(float) * STATE_DIM * 2));\n\n  // Copy both act and nominal initial state\n  HANDLE_ERROR(cudaMemcpy(x0_device, x0_host_act.data(), sizeof(float) * STATE_DIM, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(\n      cudaMemcpy(x0_device + STATE_DIM, x0_host_nom.data(), sizeof(float) * STATE_DIM, cudaMemcpyHostToDevice));\n\n  // Define the return arguments in global device memory\n  float* x_thread_device;\n  float* xdot_thread_device;\n  float* u_thread_device;\n\n  HANDLE_ERROR(cudaMalloc((void**)&x_thread_device, sizeof(float) * STATE_DIM * 2));\n  HANDLE_ERROR(cudaMalloc((void**)&xdot_thread_device, sizeof(float) * STATE_DIM * 2));\n  HANDLE_ERROR(cudaMalloc((void**)&u_thread_device, sizeof(float) * CONTROL_DIM * 2));\n\n  dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y, 2);\n  dim3 dimGrid(100);\n\n  loadGlobalToShared_KernelTest<2>\n      <<<dimGrid, dimBlock>>>(x0_device, x_thread_device, xdot_thread_device, u_thread_device);\n  CudaCheckError();\n\n  // Copy the initial_state for actual and nominal\n  HANDLE_ERROR(\n      cudaMemcpy(x_thread_host_act.data(), x_thread_device, sizeof(float) * STATE_DIM, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(x_thread_host_nom.data(), x_thread_device + STATE_DIM, sizeof(float) * STATE_DIM,\n                          cudaMemcpyDeviceToHost));\n  // Copy the xdot for actual and nominal\n  HANDLE_ERROR(\n      cudaMemcpy(xdot_thread_host_act.data(), xdot_thread_device, sizeof(float) * STATE_DIM, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(xdot_thread_host_nom.data(), xdot_thread_device + STATE_DIM, sizeof(float) * STATE_DIM,\n                          cudaMemcpyDeviceToHost));\n  // copy the initial u for actual and nominal\n  HANDLE_ERROR(\n      cudaMemcpy(u_thread_host_act.data(), u_thread_device, sizeof(float) * CONTROL_DIM, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(u_thread_host_nom.data(), u_thread_device + CONTROL_DIM, sizeof(float) * CONTROL_DIM,\n                          cudaMemcpyDeviceToHost));\n\n  // Free the cuda memory that we allocated\n  cudaFree(x0_device);\n\n  cudaFree(x_thread_device);\n  cudaFree(xdot_thread_device);\n  cudaFree(u_thread_device);\n}\n\ntemplate <class COST_T>\n__global__ void computeAndSaveCostAllRollouts_KernelTest(COST_T* cost, int state_dim, int num_rollouts,\n                                                         float* running_costs, float* terminal_state,\n                                                         float* cost_rollout_device)\n{\n  int tid = blockDim.x * blockIdx.x + threadIdx.x;  // index on rollouts\n                                                    //    if (tid == 0) {\n  //        printf(\"Current state [%f, %f, %f, %f]\\n\", terminal_state[state_dim * tid],\n  //               terminal_state[state_dim * tid + 1], terminal_state[state_dim * tid + 2],\n  //               terminal_state[state_dim * tid + 3]);\n  //        printf(\"Current cost [%f]\\n\", running_costs[tid]);\n  //    }\n  mppi::kernels::computeAndSaveCost(num_rollouts, 2, tid, cost, &terminal_state[state_dim * tid], running_costs[tid],\n                                    nullptr, cost_rollout_device);\n  //    if (tid == 0) {\n  //        printf(\"Total cost [%f]\\n\", cost_rollout_device[tid]);\n  //    }\n}\n\ntemplate <class COST_T, int STATE_DIM, int NUM_ROLLOUTS>\nvoid launchComputeAndSaveCostAllRollouts_KernelTest(COST_T& cost,\n                                                    const std::array<float, NUM_ROLLOUTS>& cost_all_rollouts,\n                                                    const std::array<float, STATE_DIM * NUM_ROLLOUTS>& terminal_states,\n                                                    std::array<float, NUM_ROLLOUTS>& cost_compute)\n{\n  const int BLOCKSIZE_X = 32;\n  const int BLOCKSIZE_Y = 8;\n\n  // Allocate CUDA memory\n  float* cost_all_rollouts_device;\n  float* terminal_states_device;\n  float* cost_compute_device;\n\n  HANDLE_ERROR(cudaMalloc((void**)&cost_all_rollouts_device, sizeof(float) * cost_all_rollouts.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&terminal_states_device, sizeof(float) * terminal_states.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_compute_device, sizeof(float) * cost_compute.size()));\n\n  // Copy Host side data to the Device\n  HANDLE_ERROR(cudaMemcpy(cost_all_rollouts_device, cost_all_rollouts.data(), sizeof(float) * cost_all_rollouts.size(),\n                          cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(terminal_states_device, terminal_states.data(), sizeof(float) * terminal_states.size(),\n                          cudaMemcpyHostToDevice));\n\n  // Launch kernel\n  dim3 blocksize(BLOCKSIZE_X, 1);\n  dim3 gridsize((NUM_ROLLOUTS + (BLOCKSIZE_X - 1)) / BLOCKSIZE_X, 1);\n  computeAndSaveCostAllRollouts_KernelTest<<<blocksize, gridsize>>>(\n      cost.cost_d_, STATE_DIM, NUM_ROLLOUTS, cost_all_rollouts_device, terminal_states_device, cost_compute_device);\n  CudaCheckError();\n\n  // Copy Device side data to the host\n  HANDLE_ERROR(cudaMemcpy(cost_compute.data(), cost_compute_device, sizeof(float) * cost_compute.size(),\n                          cudaMemcpyDeviceToHost));\n\n  // free cuda Memory\n  cudaFree(cost_all_rollouts_device);\n  cudaFree(terminal_states_device);\n  cudaFree(cost_compute_device);\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nvoid launchRolloutKernel_nom_act(DYN_T* dynamics, COST_T* costs, SAMPLER_T* sampler, float dt, const int num_timesteps,\n                                 const int num_rollouts, float lambda, float alpha, const std::vector<float>& x0,\n                                 const std::vector<float>& nom_control_seq, std::vector<float>& trajectory_costs_act,\n                                 std::vector<float>& trajectory_costs_nom, cudaStream_t stream)\n{\n  float* initial_state_d;\n  float* trajectory_costs_d;\n\n  const int BLOCKSIZE_X = 32;  // TODO we should change this parameter to be passed in to test multiple of them\n  const int BLOCKSIZE_Y = 8;\n\n  /**\n   * Ensure dynamics, costs, and sampler exist on GPU\n   */\n  dynamics->bindToStream(stream);\n  costs->bindToStream(stream);\n  sampler->bindToStream(stream);\n  // Call the GPU setup functions of the dynamics, costs, and sampler\n  dynamics->GPUSetup();\n  costs->GPUSetup();\n  sampler->GPUSetup();\n\n  sampler->setNumTimesteps(num_timesteps);\n  sampler->setNumRollouts(num_rollouts);\n  sampler->setNumDistributions(2);\n\n  // Create x init cuda array\n  HANDLE_ERROR(cudaMalloc((void**)&initial_state_d, sizeof(float) * DYN_T::STATE_DIM * 2));\n  // Create cost trajectory cuda array\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * num_rollouts * 2));\n  // Create random noise generator\n  curandGenerator_t gen;\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetStream(gen, stream);\n\n  /**\n   * Fill in GPU arrays\n   */\n  HANDLE_ERROR(\n      cudaMemcpyAsync(initial_state_d, x0.data(), sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(initial_state_d + DYN_T::STATE_DIM, x0.data(), sizeof(float) * DYN_T::STATE_DIM,\n                               cudaMemcpyHostToDevice, stream));\n\n  sampler->copyImportanceSamplerToDevice(nom_control_seq.data(), 0, false);\n  sampler->copyImportanceSamplerToDevice(nom_control_seq.data(), 1, false);\n  // Generate samples and do stream synchronize\n  sampler->generateSamples(1, 0, gen, true);\n  dim3 threadsPerBlock(BLOCKSIZE_X, BLOCKSIZE_Y, 2);\n  // Launch rollout kernel\n  mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLER_T>(dynamics, costs, sampler, dt, num_timesteps,\n                                                               num_rollouts, lambda, alpha, initial_state_d,\n                                                               trajectory_costs_d, threadsPerBlock, stream, false);\n\n  // Copy the costs back to the host\n  HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_act.data(), trajectory_costs_d, num_rollouts * sizeof(float),\n                               cudaMemcpyDeviceToHost, stream));\n\n  HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_nom.data(), trajectory_costs_d + num_rollouts,\n                               num_rollouts * sizeof(float), cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  cudaFree(initial_state_d);\n  cudaFree(trajectory_costs_d);\n}\n\ntemplate <class DYNAMICS_T, class COSTS_T, int NUM_ROLLOUTS, int BLOCKSIZE_X, int BLOCKSIZE_Y>\n__global__ void autorallyRolloutKernel(int num_timesteps, float* state_d, float* U_d, float* du_d, float* nu_d,\n                                       float* costs_d, DYNAMICS_T* dynamics_model, COSTS_T* mppi_costs, int opt_delay,\n                                       float lambda, float alpha, float dt)\n{\n  int i, j;\n  int tdx = threadIdx.x;\n  int tdy = threadIdx.y;\n  int bdx = blockIdx.x;\n\n  // Initialize the local state, controls, and noise\n  float* s;\n  float* s_der;\n  float* u;\n  float* nu;\n  float* du;\n  int* crash;\n\n  const int STATE_DIM = DYNAMICS_T::STATE_DIM;\n  const int CONTROL_DIM = DYNAMICS_T::CONTROL_DIM;\n  const int SHARED_MEM_REQUEST_GRD = DYNAMICS_T::SHARED_MEM_REQUEST_GRD;\n  const int SHARED_MEM_REQUEST_BLK = DYNAMICS_T::SHARED_MEM_REQUEST_BLK;\n\n  // Create shared arrays for holding state and control data.\n  __shared__ float state_shared[BLOCKSIZE_X * STATE_DIM];\n  __shared__ float state_der_shared[BLOCKSIZE_X * STATE_DIM];\n  __shared__ float control_shared[BLOCKSIZE_X * CONTROL_DIM];\n  __shared__ float control_var_shared[BLOCKSIZE_X * CONTROL_DIM];\n  __shared__ float exploration_variance[BLOCKSIZE_X * CONTROL_DIM];\n  __shared__ int crash_status[BLOCKSIZE_X];\n  // Create a shared array for the dynamics model to use\n  __shared__ float theta[SHARED_MEM_REQUEST_GRD / sizeof(float) + 1 + SHARED_MEM_REQUEST_BLK * BLOCKSIZE_X];\n  __shared__ float theta_c[COSTS_T::SHARED_MEM_REQUEST_GRD + COSTS_T::SHARED_MEM_REQUEST_BLK * BLOCKSIZE_X];\n  __shared__ float y[DYNAMICS_T::OUTPUT_DIM];\n\n  // Initialize trajectory cost\n  float running_cost = 0;\n\n  int global_idx = BLOCKSIZE_X * bdx + tdx;\n  if (global_idx < NUM_ROLLOUTS)\n  {\n    // Portion of the shared array belonging to each x-thread index.\n    s = &state_shared[tdx * STATE_DIM];\n    s_der = &state_der_shared[tdx * STATE_DIM];\n    u = &control_shared[tdx * CONTROL_DIM];\n    du = &control_var_shared[tdx * CONTROL_DIM];\n    nu = &exploration_variance[tdx * CONTROL_DIM];\n    crash = &crash_status[tdx];\n    // Load the initial state, nu, and zero the noise\n    for (i = tdy; i < STATE_DIM; i += blockDim.y)\n    {\n      s[i] = state_d[i];\n      s_der[i] = 0;\n    }\n    // Load nu\n    for (i = tdy; i < CONTROL_DIM; i += blockDim.y)\n    {\n      u[i] = 0;\n      du[i] = 0;\n      nu[i] = nu_d[i];\n    }\n    crash[0] = 0;\n  }\n  __syncthreads();\n  /*<----Start of simulation loop-----> */\n  dynamics_model->initializeDynamics(s, u, y, theta, 0.0, dt);\n  mppi_costs->initializeCosts(s, u, theta_c, 0.0, dt);\n  for (i = 0; i < num_timesteps; i++)\n  {\n    if (global_idx < NUM_ROLLOUTS)\n    {\n      for (j = tdy; j < CONTROL_DIM; j += blockDim.y)\n      {\n        // Noise free rollout\n        if (global_idx == 0 || i < opt_delay)\n        {  // Don't optimize variables that are already being executed\n          du[j] = 0.0;\n          u[j] = U_d[i * CONTROL_DIM + j];\n        }\n        else if (global_idx >= .99 * NUM_ROLLOUTS)\n        {\n          du[j] = du_d[CONTROL_DIM * num_timesteps * (BLOCKSIZE_X * bdx + tdx) + i * CONTROL_DIM + j] * nu[j];\n          u[j] = du[j];\n        }\n        else\n        {\n          du[j] = du_d[CONTROL_DIM * num_timesteps * (BLOCKSIZE_X * bdx + tdx) + i * CONTROL_DIM + j] * nu[j];\n          u[j] = U_d[i * CONTROL_DIM + j] + du[j];\n        }\n        du_d[CONTROL_DIM * num_timesteps * (BLOCKSIZE_X * bdx + tdx) + i * CONTROL_DIM + j] = u[j];\n      }\n    }\n    __syncthreads();\n    dynamics_model->enforceConstraints(s, &du_d[(global_idx * num_timesteps + i) * CONTROL_DIM]);\n    if (tdy == 0 && global_idx < NUM_ROLLOUTS)\n    {\n      dynamics_model->enforceConstraints(s, u);\n    }\n    __syncthreads();\n    // Compute the cost of the being in the current state\n    if (tdy == 0 && global_idx < NUM_ROLLOUTS && i > 0 && crash[0] > -1)\n    {\n      // Running average formula\n      running_cost +=\n          (mppi_costs->computeRunningCost(s, u, du, nu, lambda, alpha, i, theta_c, crash) - running_cost) / (1.0 * i);\n      //      printf(\"AutoRa Current State rollout %i, timestep: %i: [%f, %f, %f, %f]\\n\", global_idx, i, s[0], s[1],\n      //      s[2], s[3]); printf(\"AutoRa Running Cost rollout %i, timestep: %i: %f\\n\", global_idx, i, running_cost);\n    }\n    // Compute the dynamics\n    if (global_idx < NUM_ROLLOUTS)\n    {\n      dynamics_model->computeStateDeriv(s, u, s_der, theta);\n    }\n    __syncthreads();\n    // Update the state\n    if (global_idx < NUM_ROLLOUTS)\n    {\n      dynamics_model->updateState(s, s_der, dt);\n    }\n    //    //Check to see if the rollout will result in a (physical) crash.\n    //    if (tdy == 0 && global_idx < NUM_ROLLOUTS) {\n    //      mppi_costs.getCrash(s, crash);\n    //    }\n  }\n  /* <------- End of the simulation loop ----------> */\n  if (global_idx < NUM_ROLLOUTS && tdy == 0)\n  {  // Write cost results back to global memory.\n    costs_d[(BLOCKSIZE_X)*bdx + tdx] = running_cost + mppi_costs->terminalCost(s, theta_c);\n  }\n}\n\ntemplate <class DYNAMICS_T, class COSTS_T, int NUM_ROLLOUTS, int NUM_TIMESTEPS, int BLOCKSIZE_X, int BLOCKSIZE_Y>\nvoid launchAutorallyRolloutKernelTest(\n    DYNAMICS_T* dynamics, COSTS_T* costs, float dt, float lambda, float alpha,\n    std::array<float, DYNAMICS_T::STATE_DIM> state_array,\n    std::array<float, NUM_TIMESTEPS * DYNAMICS_T::CONTROL_DIM> control_array,\n    std::array<float, NUM_TIMESTEPS * NUM_ROLLOUTS * DYNAMICS_T::CONTROL_DIM> control_noise_array,\n    std::array<float, DYNAMICS_T::CONTROL_DIM> sigma_u, std::array<float, NUM_ROLLOUTS>& costs_out,\n    std::array<float, NUM_TIMESTEPS * NUM_ROLLOUTS * DYNAMICS_T::CONTROL_DIM>& control_noise_out, int opt_delay,\n    cudaStream_t stream)\n{\n  float* state_d;\n  float* U_d;\n  float* du_d;\n  float* nu_d;\n  float* costs_d;\n\n  // Allocate CUDA memory for the rollout\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * state_array.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&U_d, sizeof(float) * control_array.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&du_d, sizeof(float) * control_noise_array.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&nu_d, sizeof(float) * sigma_u.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&costs_d, sizeof(float) * costs_out.size()));\n\n  // Copy the initial values\n  HANDLE_ERROR(\n      cudaMemcpyAsync(state_d, state_array.data(), sizeof(float) * state_array.size(), cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(\n      cudaMemcpyAsync(U_d, control_array.data(), sizeof(float) * control_array.size(), cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(du_d, control_noise_array.data(), sizeof(float) * control_noise_array.size(),\n                               cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(nu_d, sigma_u.data(), sizeof(float) * sigma_u.size(), cudaMemcpyHostToDevice, stream));\n\n  const int GRIDSIZE_X = (NUM_ROLLOUTS - 1) / BLOCKSIZE_X + 1;\n\n  dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y, 1);\n  dim3 dimGrid(GRIDSIZE_X, 1, 1);\n\n  autorallyRolloutKernel<DYNAMICS_T, COSTS_T, NUM_ROLLOUTS, BLOCKSIZE_X, BLOCKSIZE_Y>\n      <<<dimGrid, dimBlock, 0, stream>>>(NUM_TIMESTEPS, state_d, U_d, du_d, nu_d, costs_d, dynamics->model_d_,\n                                         costs->cost_d_, opt_delay, lambda, alpha, dt);\n\n  CudaCheckError();\n\n  // Copy data back\n  HANDLE_ERROR(\n      cudaMemcpyAsync(costs_out.data(), costs_d, sizeof(float) * costs_out.size(), cudaMemcpyDeviceToHost, stream));\n\n  // Copy the noise back\n  HANDLE_ERROR(cudaMemcpyAsync(control_noise_out.data(), du_d, sizeof(float) * control_noise_out.size(),\n                               cudaMemcpyDeviceToHost, stream));\n\n  // Deallocate CUDA Memory\n  HANDLE_ERROR(cudaFree(state_d));\n  HANDLE_ERROR(cudaFree(U_d));\n  HANDLE_ERROR(cudaFree(du_d));\n  HANDLE_ERROR(cudaFree(nu_d));\n  HANDLE_ERROR(cudaFree(costs_d));\n}\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nvoid launchCPURolloutKernel(DYN_T* model, COST_T* cost, SAMPLER_T* sampler, float dt, const int num_timesteps,\n                            const int num_rollouts, float lambda, float alpha,\n                            const Eigen::Ref<const typename DYN_T::state_array>& x0,\n                            Eigen::Ref<Eigen::MatrixXf> trajectory_costs, cudaStream_t stream)\n{\n  using state_array = typename DYN_T::state_array;\n  using output_array = typename DYN_T::output_array;\n  using control_array = typename DYN_T::control_array;\n\n  Eigen::MatrixXf control_noise = Eigen::MatrixXf::Zero(DYN_T::CONTROL_DIM, num_rollouts * num_timesteps);\n  Eigen::MatrixXi crash_status = Eigen::MatrixXi::Zero(num_rollouts, 1);\n  HANDLE_ERROR(cudaMemcpy(control_noise.data(), sampler->getControlSample(0, 0, 0),\n                          sizeof(float) * DYN_T::CONTROL_DIM * num_rollouts * num_timesteps, cudaMemcpyDeviceToHost));\n\n  state_array curr_x, next_x, x_der;\n  control_array u;\n  output_array y;\n  for (int sample_idx = 0; sample_idx < num_rollouts; sample_idx++)\n  {\n    curr_x = x0;\n    model->initializeDynamics(curr_x, u, y, 0, dt);\n    cost->initializeCosts(y, u, 0, dt);\n    float& running_cost = trajectory_costs(sample_idx, 0);\n    running_cost = 0.0f;\n    for (int t = 0; t < num_timesteps; t++)\n    {\n      u = control_noise.col(t + num_timesteps * sample_idx);\n      model->enforceConstraints(curr_x, u);\n      model->step(curr_x, next_x, x_der, u, y, t, dt);\n      running_cost += cost->computeRunningCost(y, u, t, &crash_status(sample_idx));\n      running_cost += sampler->computeLikelihoodRatioCost(u, t, 0, lambda, alpha);\n      curr_x = next_x;\n    }\n    running_cost += cost->terminalCost(y);\n    running_cost /= num_timesteps;\n  }\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/core/rollout_kernel_test.cuh",
    "content": "#pragma once\n\n#ifndef KERNEL_TESTS_MPPI_CORE_MPPI_CORE_KERNEL_TEST_CUH_\n#define KERNEL_TESTS_MPPI_CORE_MPPI_CORE_KERNEL_TEST_CUH_\n\n#include <mppi/core/mppi_common.cuh>\n#include <curand.h>\n#include <vector>\n#include <array>\n\n// Declare some sizes for the kernel parameters\n\ntemplate <int BLOCKSIZE_Z = 1>\n__global__ void loadGlobalToShared_KernelTest(float* x0_device, float* x_thread, float* xdot_thread, float* u_thread);\n\nvoid launchGlobalToShared_KernelTest(const std::vector<float>& x0_host, std::vector<float>& x_thread_host,\n                                     std::vector<float>& xdot_thread_host, std::vector<float>& u_thread_host);\n\nvoid launchGlobalToShared_KernelTest_nom_act(\n    const std::vector<float>& x0_host_act, std::vector<float>& x_thread_host_act,\n    std::vector<float>& xdot_thread_host_act, std::vector<float>& u_thread_host_act,\n    const std::vector<float>& x0_host_nom, std::vector<float>& x_thread_host_nom,\n    std::vector<float>& xdot_thread_host_nom, std::vector<float>& u_thread_host_nom);\n\ntemplate <class DYN_T, class COST_T, class SAMPLER_T>\nvoid launchRolloutKernel_nom_act(DYN_T* dynamics, COST_T* costs, SAMPLER_T* sampler, float dt, const int num_timesteps,\n                                 const int num_rollouts, float lambda, float alpha, const std::vector<float>& x0,\n                                 const std::vector<float>& nom_control_seq, std::vector<float>& trajectory_costs_act,\n                                 std::vector<float>& trajectory_costs_nom, cudaStream_t stream = 0);\n\ntemplate <class COST_T>\n__global__ void computeAndSaveCostAllRollouts_KernelTest(COST_T* cost, int state_dim, int num_rollouts,\n                                                         float* running_costs, float* terminal_state,\n                                                         float* cost_rollout_device);\n\ntemplate <class COST_T, int STATE_DIM, int NUM_ROLLOUTS>\nvoid launchComputeAndSaveCostAllRollouts_KernelTest(COST_T& cost,\n                                                    const std::array<float, NUM_ROLLOUTS>& cost_all_rollouts,\n                                                    const std::array<float, STATE_DIM * NUM_ROLLOUTS>& terminal_states,\n                                                    std::array<float, NUM_ROLLOUTS>& cost_compute);\n\ntemplate <class DYNAMICS_T, class COSTS_T, int NUM_ROLLOUTS, int BLOCKSIZE_X, int BLOCKSIZE_Y>\n__global__ void autorallyRolloutKernel(int num_timesteps, float* state_d, float* U_d, float* du_d, float* nu_d,\n                                       float* costs_d, DYNAMICS_T* dynamics_model, COSTS_T* mppi_costs, int opt_delay,\n                                       float lambda, float alpha, float dt);\n\ntemplate <class DYNAMICS_T, class COSTS_T, int NUM_ROLLOUTS, int NUM_TIMESTEPS, int BLOCKSIZE_X, int BLOCKSIZE_Y>\nvoid launchAutorallyRolloutKernelTest(\n    DYNAMICS_T* dynamics, COSTS_T* costs, float dt, float lambda, float alpha,\n    std::array<float, DYNAMICS_T::STATE_DIM> state_array,\n    std::array<float, NUM_TIMESTEPS * DYNAMICS_T::CONTROL_DIM> control_array,\n    std::array<float, NUM_TIMESTEPS * NUM_ROLLOUTS * DYNAMICS_T::CONTROL_DIM> control_noise_array,\n    std::array<float, DYNAMICS_T::CONTROL_DIM> sigma_u, std::array<float, NUM_ROLLOUTS>& costs_out,\n    std::array<float, NUM_TIMESTEPS * NUM_ROLLOUTS * DYNAMICS_T::CONTROL_DIM>& control_noise_out, int opt_delay,\n    cudaStream_t stream);\n\n#if __CUDACC__\n#include \"rollout_kernel_test.cu\"\n#endif\n\n#endif  // !KERNEL_TESTS_MPPI_CORE_MPPI_CORE_KERNEL_TEST_CUH_\n"
  },
  {
    "path": "tests/include/kernel_tests/core/weightedreduction_kernel_test.cu",
    "content": "#include \"weightedreduction_kernel_test.cuh\"\n\n__global__ void setInitialControlToZero_KernelTest(int control_dim, float* u_d, float* u_intermediate)\n{\n  int thread_idx = blockDim.x * blockIdx.x + threadIdx.x;\n  mppi::kernels::setInitialControlToZero(control_dim, thread_idx, u_d, u_intermediate);\n}\n\ntemplate <int num_threads, int control_dim>\nvoid launchSetInitialControlToZero_KernelTest(std::array<float, control_dim>& u_host,\n                                              std::array<float, num_threads * control_dim>& u_intermediate_host)\n{\n  float* u_dev;\n  float* u_intermediate_dev;\n\n  // Allocate Memory\n  HANDLE_ERROR(cudaMalloc((void**)&u_dev, sizeof(float) * u_host.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&u_intermediate_dev, sizeof(float) * u_intermediate_host.size()))\n\n  setInitialControlToZero_KernelTest<<<1, num_threads>>>(control_dim, u_dev, u_intermediate_dev);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(u_host.data(), u_dev, sizeof(float) * u_host.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(u_intermediate_host.data(), u_intermediate_dev, sizeof(float) * u_intermediate_host.size(),\n                          cudaMemcpyDeviceToHost))\n\n  cudaFree(u_dev);\n  cudaFree(u_intermediate_dev);\n}\n\ntemplate <int control_dim>\n__global__ void strideControlWeightReduction_KernelTest(int num_rollouts, int num_timesteps, int sum_stride,\n                                                        float* exp_costs_d, float normalizer, float* du_d,\n                                                        float* u_intermediate)\n{\n  int thread_idx = threadIdx.x;\n  int block_idx = blockIdx.x;\n\n  float u_thread[control_dim];\n  float* u_intermediate_thread = &u_intermediate[block_idx * control_dim * ((num_rollouts - 1) / sum_stride + 1)];\n\n  mppi::kernels::strideControlWeightReduction(num_rollouts, num_timesteps, sum_stride, thread_idx, block_idx,\n                                              control_dim, exp_costs_d, normalizer, du_d, u_thread,\n                                              u_intermediate_thread);\n}\n\ntemplate <int control_dim, int num_rollouts, int num_timesteps, int sum_stride>\nvoid launchStrideControlWeightReduction_KernelTest(\n    float normalizer, const std::array<float, num_rollouts>& exp_costs_host,\n    const std::array<float, num_rollouts * num_timesteps * control_dim>& du_host,\n    std::array<float, num_timesteps * control_dim*((num_rollouts - 1) / sum_stride + 1)>& u_intermediate_host)\n{\n  float* exp_costs_dev;\n  float* du_dev;\n  float* u_intermediate_dev;\n\n  // Allocate Memory\n  HANDLE_ERROR(cudaMalloc((void**)&exp_costs_dev, sizeof(float) * exp_costs_host.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&du_dev, sizeof(float) * du_host.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&u_intermediate_dev, sizeof(float) * u_intermediate_host.size()));\n\n  HANDLE_ERROR(\n      cudaMemcpy(exp_costs_dev, exp_costs_host.data(), sizeof(float) * exp_costs_host.size(), cudaMemcpyHostToDevice))\n  HANDLE_ERROR(cudaMemcpy(du_dev, du_host.data(), sizeof(float) * du_host.size(), cudaMemcpyHostToDevice))\n\n  dim3 blockdim((num_rollouts - 1) / sum_stride + 1, 1, 1);\n  dim3 griddim(num_timesteps, 1, 1);\n\n  strideControlWeightReduction_KernelTest<control_dim><<<griddim, blockdim>>>(\n      num_rollouts, num_timesteps, sum_stride, exp_costs_dev, normalizer, du_dev, u_intermediate_dev);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(u_intermediate_host.data(), u_intermediate_dev, sizeof(float) * u_intermediate_host.size(),\n                          cudaMemcpyDeviceToHost))\n\n  cudaFree(exp_costs_dev);\n  cudaFree(du_dev);\n  cudaFree(u_intermediate_dev);\n}\n\ntemplate <int control_dim>\n__global__ void rolloutWeightReductionAndSaveControl_KernelTest(int num_rollouts, int num_timesteps, int sum_stride,\n                                                                float* u_intermediate, float* du_new_d)\n{\n  int thread_idx = threadIdx.x;  // Current cell\n  int block_idx = blockIdx.x;    // Current timestep\n\n  float u[control_dim];\n  float* u_intermediate_thread = &u_intermediate[block_idx * control_dim * ((num_rollouts - 1) / sum_stride + 1)];\n  mppi::kernels::rolloutWeightReductionAndSaveControl(thread_idx, block_idx, num_rollouts, num_timesteps, control_dim,\n                                                      sum_stride, u, u_intermediate_thread, du_new_d);\n}\n\ntemplate <int control_dim, int num_rollouts, int num_timesteps, int sum_stride>\nvoid launchRolloutWeightReductionAndSaveControl_KernelTest(\n    const std::array<float, num_timesteps * control_dim*((num_rollouts - 1) / sum_stride + 1)>& u_intermediate_host,\n    std::array<float, num_timesteps * control_dim>& du_new_host)\n{\n  float* u_intermediate_dev;\n  float* du_new_dev;\n\n  // Allocate Memory\n  HANDLE_ERROR(cudaMalloc((void**)&u_intermediate_dev, sizeof(float) * u_intermediate_host.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&du_new_dev, sizeof(float) * du_new_host.size()))\n\n  HANDLE_ERROR(cudaMemcpy(u_intermediate_dev, u_intermediate_host.data(), sizeof(float) * u_intermediate_host.size(),\n                          cudaMemcpyHostToDevice))\n  dim3 blockdim((num_rollouts - 1) / sum_stride + 1, 1, 1);\n  dim3 griddim(num_timesteps, 1, 1);\n\n  rolloutWeightReductionAndSaveControl_KernelTest<control_dim>\n      <<<griddim, blockdim>>>(num_rollouts, num_timesteps, sum_stride, u_intermediate_dev, du_new_dev);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(du_new_host.data(), du_new_dev, sizeof(float) * du_new_host.size(), cudaMemcpyDeviceToHost))\n\n  cudaFree(u_intermediate_dev);\n  cudaFree(du_new_dev);\n}\n\n// Old Weighted Reduction Kernel Implementation for comparison\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int BLOCKSIZE_WRX>\n__global__ void autorallyWeightedReductionKernel(float* states_d, float* du_d, float normalizer, int num_timesteps)\n{\n  int tdx = threadIdx.x;\n  int bdx = blockIdx.x;\n\n  __shared__ float u_system[CONTROL_DIM * ((NUM_ROLLOUTS - 1) / BLOCKSIZE_WRX + 1)];\n  int stride = BLOCKSIZE_WRX;\n\n  float u[CONTROL_DIM];\n\n  int i, j;\n  for (i = 0; i < CONTROL_DIM; i++)\n  {\n    u[i] = 0;\n  }\n\n  for (j = 0; j < CONTROL_DIM; j++)\n  {\n    u_system[tdx * CONTROL_DIM + j] = 0;\n  }\n  __syncthreads();\n\n  if (BLOCKSIZE_WRX * tdx < NUM_ROLLOUTS)\n  {\n    float weight = 0;\n    for (i = 0; i < stride; i++)\n    {\n      if (stride * tdx + i < NUM_ROLLOUTS)\n      {\n        weight = states_d[stride * tdx + i] / normalizer;\n        for (j = 0; j < CONTROL_DIM; j++)\n        {\n          u[j] = du_d[(stride * tdx + i) * (num_timesteps * CONTROL_DIM) + bdx * CONTROL_DIM + j];\n          u_system[tdx * CONTROL_DIM + j] += weight * u[j];\n        }\n      }\n    }\n  }\n  __syncthreads();\n  if (tdx == 0 && bdx < num_timesteps)\n  {\n    for (i = 0; i < CONTROL_DIM; i++)\n    {\n      u[i] = 0;\n    }\n    for (i = 0; i < (NUM_ROLLOUTS - 1) / BLOCKSIZE_WRX + 1; i++)\n    {\n      for (j = 0; j < CONTROL_DIM; j++)\n      {\n        u[j] += u_system[CONTROL_DIM * i + j];\n      }\n    }\n    for (i = 0; i < CONTROL_DIM; i++)\n    {\n      du_d[CONTROL_DIM * bdx + i] = u[i];\n    }\n  }\n}\n\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int BLOCKSIZE_WRX, int NUM_TIMESTEPS>\nvoid launchAutoRallyWeightedReductionKernelTest(\n    std::array<float, NUM_ROLLOUTS> exp_costs,\n    std::array<float, CONTROL_DIM * NUM_ROLLOUTS * NUM_TIMESTEPS> perturbed_controls, float normalizer,\n    std::array<float, CONTROL_DIM * NUM_TIMESTEPS>& controls_out, cudaStream_t stream)\n{\n  float* exp_costs_d_;\n  float* perturbed_controls_d_;\n\n  // Allocate CUDA memory for the device pointers\n  HANDLE_ERROR(cudaMalloc((void**)&exp_costs_d_, sizeof(float) * exp_costs.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&perturbed_controls_d_, sizeof(float) * perturbed_controls.size()));\n\n  // Memcpy the data to the device\n  HANDLE_ERROR(cudaMemcpyAsync(exp_costs_d_, exp_costs.data(), sizeof(float) * exp_costs.size(), cudaMemcpyHostToDevice,\n                               stream));\n  HANDLE_ERROR(cudaMemcpyAsync(perturbed_controls_d_, perturbed_controls.data(),\n                               sizeof(float) * perturbed_controls.size(), cudaMemcpyHostToDevice, stream));\n\n  // Setup the block and grid sizes\n  dim3 dimBlock((NUM_ROLLOUTS - 1) / BLOCKSIZE_WRX + 1, 1, 1);\n  dim3 dimGrid(NUM_TIMESTEPS, 1, 1);\n\n  // Kernel launch and error check\n  autorallyWeightedReductionKernel<CONTROL_DIM, NUM_ROLLOUTS, BLOCKSIZE_WRX>\n      <<<dimGrid, dimBlock, 0, stream>>>(exp_costs_d_, perturbed_controls_d_, normalizer, NUM_TIMESTEPS);\n  CudaCheckError();\n\n  // Copy the result back to the GPU\n  HANDLE_ERROR(cudaMemcpyAsync(controls_out.data(), perturbed_controls_d_, sizeof(float) * controls_out.size(),\n                               cudaMemcpyDeviceToHost, stream));\n\n  // Free the CUDA memory\n  HANDLE_ERROR(cudaFree(exp_costs_d_));\n  HANDLE_ERROR(cudaFree(perturbed_controls_d_));\n}\n\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int SUM_STRIDE, int NUM_TIMESTEPS>\nvoid launchWeightedReductionKernelTest(std::array<float, NUM_ROLLOUTS> exp_costs,\n                                       std::array<float, CONTROL_DIM * NUM_ROLLOUTS * NUM_TIMESTEPS> perturbed_controls,\n                                       float normalizer, std::array<float, CONTROL_DIM * NUM_TIMESTEPS>& controls_out,\n                                       cudaStream_t stream)\n{\n  float* exp_costs_d_;\n  float* perturbed_controls_d_;\n  float* optimal_controls_d_;\n\n  // Allocate CUDA memory for the device pointers\n  HANDLE_ERROR(cudaMalloc((void**)&exp_costs_d_, sizeof(float) * exp_costs.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&perturbed_controls_d_, sizeof(float) * perturbed_controls.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&optimal_controls_d_, sizeof(float) * controls_out.size()));\n\n  // Memcpy the data to the device\n  HANDLE_ERROR(cudaMemcpyAsync(exp_costs_d_, exp_costs.data(), sizeof(float) * exp_costs.size(), cudaMemcpyHostToDevice,\n                               stream));\n  HANDLE_ERROR(cudaMemcpyAsync(perturbed_controls_d_, perturbed_controls.data(),\n                               sizeof(float) * perturbed_controls.size(), cudaMemcpyHostToDevice, stream));\n\n  // Setup the block and grid sizes\n  dim3 dimBlock((NUM_ROLLOUTS - 1) / SUM_STRIDE + 1, 1, 1);\n  dim3 dimGrid(NUM_TIMESTEPS, 1, 1);\n\n  mppi::kernels::weightedReductionKernel<CONTROL_DIM><<<dimGrid, dimBlock, 0, stream>>>(\n      exp_costs_d_, perturbed_controls_d_, optimal_controls_d_, normalizer, NUM_TIMESTEPS, NUM_ROLLOUTS, SUM_STRIDE);\n  CudaCheckError();\n\n  // Copy the result back to the GPU\n  HANDLE_ERROR(cudaMemcpyAsync(controls_out.data(), optimal_controls_d_, sizeof(float) * controls_out.size(),\n                               cudaMemcpyDeviceToHost, stream));\n\n  // Free the CUDA memory\n  HANDLE_ERROR(cudaFree(exp_costs_d_));\n  HANDLE_ERROR(cudaFree(perturbed_controls_d_));\n  HANDLE_ERROR(cudaFree(optimal_controls_d_));\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/core/weightedreduction_kernel_test.cuh",
    "content": "#pragma once\n\n#ifndef MPPI_GENERIC_KERNEL_TESTS_MPPI_CORE_WEIGHTEDREDUCTION_KERNEL_TEST_CUH_\n#define MPPI_GENERIC_KERNEL_TESTS_MPPI_CORE_WEIGHTEDREDUCTION_KERNEL_TEST_CUH_\n\n#include <mppi/core/mppi_common.cuh>\n#include <array>\n\n__global__ void setInitialControlToZero_KernelTest(int control_dim, float* u_d, float* u_intermediate);\n\ntemplate <int num_threads, int control_dim>\nvoid launchSetInitialControlToZero_KernelTest(std::array<float, control_dim>& u_host,\n                                              std::array<float, num_threads * control_dim>& u_intermediate_host);\n\ntemplate <int CONTROL_DIM>\n__global__ void strideControlWeightReduction_KernelTest(int num_rollouts, int num_timesteps, int sum_stride,\n                                                        float* exp_costs_d, float normalizer, float* du_d,\n                                                        float* u_intermediate);\n\ntemplate <int control_dim, int num_rollouts, int num_timesteps, int sum_stride>\nvoid launchStrideControlWeightReduction_KernelTest(\n    float normalizer, const std::array<float, num_rollouts>& exp_costs_host,\n    const std::array<float, num_rollouts * num_timesteps * control_dim>& du_host,\n    std::array<float, num_timesteps * control_dim*((num_rollouts - 1) / sum_stride + 1)>& u_intermediate_host);\n\ntemplate <int control_dim>\n__global__ void rolloutWeightReductionAndSaveControl_KernelTest(int num_rollouts, int num_timesteps, int sum_stride,\n                                                                float* u_intermediate, float* du_new_d);\n\ntemplate <int control_dim, int num_rollouts, int num_timesteps, int sum_stride>\nvoid launchRolloutWeightReductionAndSaveControl_KernelTest(\n    const std::array<float, num_timesteps * control_dim*((num_rollouts - 1) / sum_stride + 1)>& u_intermediate_host,\n    std::array<float, num_timesteps * control_dim>& du_new_host);\n\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int BLOCKSIZE_WRX>\n__global__ void autorallyWeightedReductionKernel(float* states_d, float* du_d, float normalizer, int num_timesteps);\n\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int BLOCKSIZE_WRX, int NUM_TIMESTEPS>\nvoid launchAutoRallyWeightedReductionKernelTest(\n    std::array<float, NUM_ROLLOUTS> exp_costs,\n    std::array<float, CONTROL_DIM * NUM_ROLLOUTS * NUM_TIMESTEPS> perturbed_controls, float normalizer,\n    std::array<float, CONTROL_DIM * NUM_TIMESTEPS>& controls_out, cudaStream_t stream);\n\ntemplate <int CONTROL_DIM, int NUM_ROLLOUTS, int SUM_STRIDE, int NUM_TIMESTEPS>\nvoid launchWeightedReductionKernelTest(std::array<float, NUM_ROLLOUTS> exp_costs,\n                                       std::array<float, CONTROL_DIM * NUM_ROLLOUTS * NUM_TIMESTEPS> perturbed_controls,\n                                       float normalizer, std::array<float, CONTROL_DIM * NUM_TIMESTEPS>& controls_out,\n                                       cudaStream_t stream);\n\n#if __CUDACC__\n#include \"weightedreduction_kernel_test.cu\"\n#endif\n\n#endif  //! MPPI_GENERIC_KERNEL_TESTS_MPPI_CORE_WEIGHTEDREDUCTION_KERNEL_TEST_CUH_"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/autorally/ar_robust_cost_kernel_test.cu",
    "content": "//\n// Created by jgibson37 on 2/7/20.\n//\n\ntemplate <class CLASS_T>\n__global__ void getCostmapCostTestKernel(CLASS_T* cost, float* test_xu, float* cost_results, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num_points)\n  {\n    float* state = &test_xu[tid];\n    cost_results[tid] = cost->getCostmapCost(state);\n  }\n}\n\ntemplate <class CLASS_T>\nvoid launchGetCostmapCostTestKernel(CLASS_T& cost, std::vector<std::array<float, 9>>& test_xu,\n                                    std::vector<float>& cost_results)\n{\n  int num_test_points = test_xu.size();\n  cost_results.resize(num_test_points * 9);\n\n  float* cost_results_d;\n  float* test_xu_d;\n  HANDLE_ERROR(cudaMalloc((void**)&cost_results_d, sizeof(float) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&test_xu_d, sizeof(float) * 9 * num_test_points))\n\n  for (int i = 0; i < num_test_points; i++)\n  {\n    for (int j = 0; j < 9; j++)\n    {\n      cost_results[9 * i + j] = test_xu[i][j];\n    }\n  }\n\n  HANDLE_ERROR(cudaMemcpy(test_xu_d, test_xu.data(), sizeof(float) * 9 * num_test_points, cudaMemcpyHostToDevice));\n\n  // TODO amount should depend on the number of query points\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  getCostmapCostTestKernel<CLASS_T>\n      <<<numBlocks, threadsPerBlock>>>(static_cast<CLASS_T*>(cost.cost_d_), test_xu_d, cost_results_d, num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(cost_results.data(), cost_results_d, sizeof(float) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(cost_results_d);\n  cudaFree(test_xu_d);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/autorally/ar_robust_cost_kernel_test.cuh",
    "content": "//\n// Created by jgibson37 on 2/7/20.\n//\n\n#ifndef MPPIGENERIC_AR_ROBUST_COST_KERNEL_TEST_CUH\n#define MPPIGENERIC_AR_ROBUST_COST_KERNEL_TEST_CUH\n\n#include \"ar_standard_cost_kernel_test.cuh\"\n\ntemplate <class CLASS_T>\n__global__ void getCostmapCostTestKernel(CLASS_T* cost, float* test_xu, float* cost_results, int num_points);\n\ntemplate <class CLASS_T>\nvoid launchGetCostmapCostTestKernel(CLASS_T& cost, std::vector<std::array<float, 9>>& test_xu,\n                                    std::vector<float>& cost_results);\n\n#include \"ar_robust_cost_kernel_test.cu\"\n\n#endif  // MPPIGENERIC_AR_ROBUST_COST_KERNEL_TEST_CUH\n"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/autorally/ar_standard_cost_kernel_test.cu",
    "content": "#include <mppi/core/mppi_common.cuh>\n\ntemplate <typename COST_T, typename PARAMS_T>\n__global__ void parameterTestKernel(const COST_T* cost, PARAMS_T& params, int& width, int& height)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid == 0)\n  {\n    // printf(\"\")\n    params = cost->getParams();\n    width = cost->getWidth();\n    height = cost->getHeight();\n  }\n}\n\ntemplate <typename COST_T, typename PARAMS_T>\nvoid launchParameterTestKernel(const COST_T& cost, PARAMS_T& params, int& width, int& height)\n{\n  PARAMS_T* params_d;\n  int* width_d;\n  int* height_d;\n  HANDLE_ERROR(cudaMalloc((void**)&params_d, sizeof(PARAMS_T)))\n  HANDLE_ERROR(cudaMalloc((void**)&width_d, sizeof(float)))\n  HANDLE_ERROR(cudaMalloc((void**)&height_d, sizeof(float)))\n\n  parameterTestKernel<COST_T, PARAMS_T><<<1, 1>>>(static_cast<COST_T*>(cost.cost_d_), *params_d, *width_d, *height_d);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(&params, params_d, sizeof(PARAMS_T), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(&width, width_d, sizeof(float), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(&height, height_d, sizeof(float), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(params_d);\n}\n\n// TODO actually check texture\n__global__ void checkCudaArrayKernel(float4* result_arr, cudaArray* array, int number)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid < number)\n  {\n    // printf(\"The thread id is: %i\\n\", tid);\n    result_arr[tid].x = 0.0;\n    result_arr[tid].y = 0.0;\n    result_arr[tid].z = 0.0;\n    result_arr[tid].w = 0.0;\n    // result_arr[tid] = array[tid];\n    // printf(array[tid]);\n  }\n}\n\nvoid launchCheckCudaArray(std::vector<float4>& result_arr, cudaArray* array, int number)\n{\n  float4* results_d;\n  HANDLE_ERROR(cudaMalloc((void**)&results_d, sizeof(float4) * number));\n\n  result_arr.resize(number);\n\n  dim3 threadsPerBlock(4, 1);\n  dim3 numBlocks(1, 1);\n  checkCudaArrayKernel<<<numBlocks, threadsPerBlock>>>(results_d, array, number);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(result_arr.data(), results_d, sizeof(float4) * number, cudaMemcpyDeviceToHost));\n\n  cudaFree(results_d);\n}\n\ntemplate <typename COST_T>\n__global__ void transformTestKernel(float3* results, COST_T* cost)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid == 0)\n  {\n    // printf(\"\")\n    results[0] = cost->getParams().r_c1;\n    results[1] = cost->getParams().r_c2;\n    results[2] = cost->getParams().trs;\n  }\n}\n\ntemplate <typename COST_T>\nvoid launchTransformTestKernel(std::vector<float3>& result, const COST_T& cost)\n{\n  result.resize(3);\n\n  // Allocate memory on the CPU for checking the mass\n  float3* results_d;\n  HANDLE_ERROR(cudaMalloc((void**)&results_d, sizeof(float3) * 3))\n\n  transformTestKernel<<<1, 1>>>(results_d, cost.cost_d_);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(result.data(), results_d, sizeof(float3) * 3, cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(results_d);\n}\n\ntemplate <typename COST_T>\n__global__ void textureTestKernel(const COST_T& cost, float4* test_results, float2* test_indexes, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid < num_points)\n  {\n    // printf(\"thread id: %i went to check texture at index %i, %i\\n\", tid, test_indexes[tid].x, test_indexes[tid].y);\n\n    // query texture\n    float4 track_params_back = cost.queryTexture(test_indexes[tid].x, test_indexes[tid].y);\n    // put result in array\n    // printf(\"thread id: %i got texture point (%f, %f, %f, %f)\\n\", tid, track_params_back.x, track_params_back.y,\n    // track_params_back.z, track_params_back.w);\n    test_results[tid] = track_params_back;\n    // test_results[tid].x = 1;\n  }\n}\n\ntemplate <typename COST_T>\nvoid launchTextureTestKernel(const COST_T& cost, std::vector<float4>& test_results, std::vector<float2>& test_indexes)\n{\n  int num_test_points = test_indexes.size();\n  test_results.resize(num_test_points);\n\n  float4* tex_results_d;\n  float2* tex_test_indexes_d;\n  HANDLE_ERROR(cudaMalloc((void**)&tex_results_d, sizeof(float4) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&tex_test_indexes_d, sizeof(float2) * num_test_points))\n\n  HANDLE_ERROR(\n      cudaMemcpy(tex_test_indexes_d, test_indexes.data(), sizeof(float2) * num_test_points, cudaMemcpyHostToDevice));\n\n  // TODO amount should depend on the number of query points\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  textureTestKernel<<<numBlocks, threadsPerBlock>>>(*cost.cost_d_, tex_results_d, tex_test_indexes_d, num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(test_results.data(), tex_results_d, sizeof(float4) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(tex_results_d);\n  cudaFree(tex_test_indexes_d);\n}\n\ntemplate <typename COST_T>\n__global__ void textureTransformTestKernel(COST_T& cost, float4* test_results, float2* test_indexes, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid < num_points)\n  {\n    // query texture\n    float4 track_params_back = cost.queryTextureTransformed(test_indexes[tid].x, test_indexes[tid].y);\n    // put result in array\n    test_results[tid] = track_params_back;\n    // test_results[tid].x = 1;\n  }\n}\n\ntemplate <typename COST_T>\nvoid launchTextureTransformTestKernel(const COST_T& cost, std::vector<float4>& test_results,\n                                      std::vector<float2>& test_indexes)\n{\n  int num_test_points = test_indexes.size();\n  test_results.resize(num_test_points);\n\n  float4* tex_results_d;\n  float2* tex_test_indexes_d;\n  HANDLE_ERROR(cudaMalloc((void**)&tex_results_d, sizeof(float4) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&tex_test_indexes_d, sizeof(float2) * num_test_points))\n\n  HANDLE_ERROR(\n      cudaMemcpy(tex_test_indexes_d, test_indexes.data(), sizeof(float2) * num_test_points, cudaMemcpyHostToDevice));\n\n  // TODO amount should depend on the number of query points\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  textureTransformTestKernel<<<numBlocks, threadsPerBlock>>>(*cost.cost_d_, tex_results_d, tex_test_indexes_d,\n                                                             num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(test_results.data(), tex_results_d, sizeof(float4) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(tex_results_d);\n  cudaFree(tex_test_indexes_d);\n}\n\ntemplate <typename COST_T>\n__global__ void trackCostTestKernel(COST_T* cost, float3* test_indexes, int num_points, float* cost_results,\n                                    int* crash_results)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num_points)\n  {\n    float state[7];\n    int crash = 0;\n    state[0] = test_indexes[tid].x;\n    state[1] = test_indexes[tid].y;\n    state[2] = test_indexes[tid].z;\n    // printf(\"got test indexes %d, state %f, %f, %f\\n\", tid, state[0], state[1], state[2]);\n    cost_results[tid] = cost->getTrackCost(state, &crash);\n    // printf(\"set results %d\\n\", tid);\n    crash_results[tid] = crash;\n    // printf(\"set crash results %d\\n\", tid);\n  }\n}\n\ntemplate <typename COST_T>\nvoid launchTrackCostTestKernel(const COST_T& cost, std::vector<float3>& test_indexes, std::vector<float>& cost_results,\n                               std::vector<int>& crash_results)\n{\n  int num_test_points = test_indexes.size();\n  crash_results.resize(num_test_points);\n  cost_results.resize(num_test_points);\n\n  float* cost_results_d;\n  int* crash_results_d;\n  float3* test_indexes_d;\n  HANDLE_ERROR(cudaMalloc((void**)&cost_results_d, sizeof(float) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&crash_results_d, sizeof(int) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&test_indexes_d, sizeof(float3) * num_test_points))\n\n  HANDLE_ERROR(\n      cudaMemcpy(test_indexes_d, test_indexes.data(), sizeof(float3) * num_test_points, cudaMemcpyHostToDevice));\n\n  // TODO amount should depend on the number of query points\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  trackCostTestKernel<<<numBlocks, threadsPerBlock>>>(cost.cost_d_, test_indexes_d, num_test_points, cost_results_d,\n                                                      crash_results_d);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(cost_results.data(), cost_results_d, sizeof(float) * num_test_points, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(crash_results.data(), crash_results_d, sizeof(int) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(cost_results_d);\n  cudaFree(crash_results_d);\n  cudaFree(test_indexes_d);\n}\n\ntemplate <typename COST_T>\n__global__ void terminalCostTestKernel(COST_T& cost, float* test_xu, float* cost_results, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num_points)\n  {\n    float* state = &test_xu[tid];\n    cost_results[tid] = cost.terminalCost(state, nullptr);\n  }\n}\n\ntemplate <typename COST_T>\nvoid launchTerminalCostTestKernel(const COST_T& cost, std::vector<std::array<float, 7>>& x,\n                                  std::vector<float>& cost_results)\n{\n  int num_test_points = x.size();\n  cost_results.resize(num_test_points * 7);\n\n  float* cost_results_d;\n  float* u_d;\n  HANDLE_ERROR(cudaMalloc((void**)&cost_results_d, sizeof(float) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&u_d, sizeof(float) * 7 * num_test_points))\n\n  for (int i = 0; i < num_test_points; i++)\n  {\n    for (int j = 0; j < 7; j++)\n    {\n      cost_results[7 * i + j] = x[i][j];\n    }\n  }\n\n  HANDLE_ERROR(cudaMemcpy(u_d, x.data(), sizeof(float) * 7 * num_test_points, cudaMemcpyHostToDevice));\n\n  // TODO amount should depend on the number of query points\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  terminalCostTestKernel<<<numBlocks, threadsPerBlock>>>(*cost.cost_d_, u_d, cost_results_d, num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(cost_results.data(), cost_results_d, sizeof(float) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(cost_results_d);\n  cudaFree(u_d);\n}\n\ntemplate <typename COST_T>\n__global__ void computeCostTestKernel(COST_T* cost, float* test_xu, float* cost_results, int* timestep, int* crash,\n                                      int num_points)\n{\n  // __shared__ float theta_c[COST_T::SHARED_MEM_REQUEST_GRD_BYTES + COST_T::SHARED_MEM_REQUEST_BLK_BYTES * 1024];\n  extern __shared__ float theta_c[];\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num_points)\n  {\n    float* state = &test_xu[tid * 9];\n    float* control = &test_xu[tid * 9 + 7];\n    cost->initializeCosts(state, control, theta_c, 0.0, 0.01);\n    cost_results[tid] = cost->computeRunningCost(state, control, timestep[tid], theta_c, crash + tid);\n  }\n}\n\ntemplate <typename COST_T>\nvoid launchComputeCostTestKernel(const COST_T& cost, std::vector<std::array<float, 9>>& test_xu,\n                                 std::vector<float>& cost_results, std::vector<int>& timesteps, std::vector<int>& crash)\n{\n  int num_test_points = test_xu.size();\n  cost_results.resize(num_test_points);\n\n  float* cost_results_d;\n  float* test_xu_d;\n  int* timestep_d;\n  int* crash_d;\n  HANDLE_ERROR(cudaMalloc((void**)&cost_results_d, sizeof(float) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&test_xu_d, sizeof(float) * 9 * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&timestep_d, sizeof(float) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&crash_d, sizeof(float) * num_test_points))\n\n  HANDLE_ERROR(cudaMemcpy(test_xu_d, test_xu.data(), sizeof(float) * 9 * num_test_points, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(timestep_d, timesteps.data(), sizeof(int) * num_test_points, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(crash_d, crash.data(), sizeof(int) * num_test_points, cudaMemcpyHostToDevice));\n\n  // TODO amount should depend on the number of query points\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  unsigned shared_mem_size = mppi::kernels::calcClassSharedMemSize<COST_T>(&cost, threadsPerBlock);\n  computeCostTestKernel<<<numBlocks, threadsPerBlock, shared_mem_size>>>(cost.cost_d_, test_xu_d, cost_results_d,\n                                                                         timestep_d, crash_d, num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(cost_results.data(), cost_results_d, sizeof(float) * num_test_points, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(crash.data(), crash_d, sizeof(float) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(cost_results_d);\n  cudaFree(test_xu_d);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/autorally/ar_standard_cost_kernel_test.cuh",
    "content": "//\n// Created by jason on 1/8/20.\n//\n\n#ifndef MPPIGENERIC_AR_STANDARD_COST_KERNEL_TEST_CUH\n#define MPPIGENERIC_AR_STANDARD_COST_KERNEL_TEST_CUH\n\n#include <mppi/cost_functions/autorally/ar_standard_cost.cuh>\n\ntemplate <typename COST_T, typename PARAMS_T>\n__global__ void parameterTestKernel(const COST_T* cost, PARAMS_T& params, int& width, int& height);\n\ntemplate <typename COST_T, typename PARAMS_T>\nvoid launchParameterTestKernel(const COST_T& cost, PARAMS_T& params, int& width, int& height);\n\nvoid launchCheckCudaArray(std::vector<float4>& result_arr, cudaArray* array, int number);\n\n__global__ void checkCudaArrayKernel(float4* result_arr, cudaArray* array, int number);\n\ntemplate <typename COST_T>\nvoid launchTransformTestKernel(std::vector<float3>& result, const COST_T& cost);\n\ntemplate <typename COST_T>\n__global__ void transformTestKernel(float3* results, COST_T* cost);\n\ntemplate <typename COST_T>\nvoid launchTextureTestKernel(const COST_T& cost, std::vector<float4>& test_results, std::vector<float2>& test_indexes);\n\ntemplate <typename COST_T>\n__global__ void textureTestKernel(const COST_T& cost, float4* test_results, float2* test_indexes, int num_points);\n\ntemplate <typename COST_T>\nvoid launchTextureTransformTestKernel(const COST_T& cost, std::vector<float4>& test_results,\n                                      std::vector<float2>& test_indexes);\n\ntemplate <typename COST_T>\n__global__ void textureTransformTestKernel(COST_T& cost, float4* test_results, float2* test_indexes, int num_points);\n\ntemplate <typename COST_T>\nvoid launchTrackCostTestKernel(const COST_T& cost, std::vector<float3>& test_indexes, std::vector<float>& cost_results,\n                               std::vector<int>& crash_results);\n\ntemplate <typename COST_T>\n__global__ void trackCostTestKernel(COST_T* cost, float3* test_indexes, int num_points, float* cost_results,\n                                    int* crash_results);\n\ntemplate <typename COST_T>\nvoid launchComputeCostTestKernel(const COST_T& cost, std::vector<std::array<float, 9>>& test_xu,\n                                 std::vector<float>& cost_results, std::vector<int>& timesteps);\n\ntemplate <typename COST_T>\n__global__ void computeCostTestKernel(COST_T& cost, float* test_xu, float* cost_results, int* timesteps,\n                                      int num_points);\n\n#include \"ar_standard_cost_kernel_test.cu\"\n\n#endif  // MPPIGENERIC_AR_STANDARD_COST_KERNEL_TEST_CUH\n"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/cartpole/cartpole_quadratic_cost_kernel_test.cu",
    "content": "__global__ void parameterTestKernel(CartpoleQuadraticCost* cost_d, CartpoleQuadraticCostParams& params_d)\n{\n  // The parameters have been set outside of the kernel on the device, copy the current values of the parameters to\n  // params_d\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    params_d = cost_d->getParams();\n  }\n}\n\nvoid launchParameterTestKernel(const CartpoleQuadraticCost& cost, CartpoleQuadraticCostParams& param_check)\n{\n  // Allocate memory for the device side parameter structure\n  CartpoleQuadraticCostParams* param_d = nullptr;\n  HANDLE_ERROR(cudaMalloc((void**)&param_d, sizeof(CartpoleQuadraticCostParams)))\n\n  parameterTestKernel<<<1, 1>>>(cost.cost_d_, *param_d);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(&param_check, param_d, sizeof(param_check), cudaMemcpyDeviceToHost))\n\n  cudaFree(param_d);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/cartpole/cartpole_quadratic_cost_kernel_test.cuh",
    "content": "#ifndef MPPIGENERIC_CARTPOLE_QUADRATIC_COST_KERNEL_TEST_CUH\n#define MPPIGENERIC_CARTPOLE_QUADRATIC_COST_KERNEL_TEST_CUH\n\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n\n/**\n * In this cost function, we are supplied with a device side cost class, and a device side parameter structure\n */\n__global__ void parameterTestKernel(CartpoleQuadraticCost* cost_d, CartpoleQuadraticCostParams* params_d);\n\n/**\n *\n */\nvoid launchParameterTestKernel(const CartpoleQuadraticCost& cost, CartpoleQuadraticCostParams& param_check);\n\n#include \"cartpole_quadratic_cost_kernel_test.cu\"\n\n#endif  //! MPPIGENERIC_CARTPOLE_QUADRATIC_COST_KERNEL_TEST_CUH"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/cost_generic_kernel_tests.cu",
    "content": "#include \"cost_generic_kernel_tests.cuh\"\n\ntemplate <class COST_T>\n__global__ void computeRunningCostTestKernel(COST_T* __restrict__ cost, const float* __restrict__ y_d,\n                                             const float* __restrict__ u_d, int num_rollouts, int num_timesteps,\n                                             float dt, float* __restrict__ output_cost)\n{\n  const int sample_idx = blockIdx.x;\n  const int t = threadIdx.x;\n  const int max_time_iters = ceilf((float)num_timesteps / blockDim.x);\n  const int sys_index = threadIdx.z + blockDim.z * blockIdx.z;\n\n  extern __shared__ float entire_buffer[];\n  float* y_shared = entire_buffer;\n  float* u_shared = &y_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::OUTPUT_DIM)];\n  // int* crash_shared = (int*)&u_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::CONTROL_DIM)];\n  // float* running_cost_shared = (float*)&crash_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z)];\n  // float* theta_c_shared = &running_cost_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * blockDim.y)];\n\n  float* running_cost_shared = &u_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::CONTROL_DIM)];\n  int* crash_shared = (int*)&running_cost_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * blockDim.y)];\n  float* theta_c_shared = (float*)&crash_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z)];\n\n  const int shared_idx = threadIdx.x + blockDim.x * threadIdx.z;\n  const int cost_idx = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n  float* y = &y_shared[shared_idx * COST_T::OUTPUT_DIM];\n  float* u = &u_shared[shared_idx * COST_T::CONTROL_DIM];\n  float* running_cost = &running_cost_shared[cost_idx];\n\n  int* crash = &crash_shared[shared_idx];\n  *crash = 0;\n\n  // Fill in shared memory\n  const int global_mem_index = (num_rollouts * sys_index + sample_idx) * num_timesteps + t;\n  mp1::loadArrayParallel<COST_T::OUTPUT_DIM>(y, 0, y_d, global_mem_index * COST_T::OUTPUT_DIM);\n  mp1::loadArrayParallel<COST_T::CONTROL_DIM>(u, 0, u_d, global_mem_index * COST_T::CONTROL_DIM);\n  __syncthreads();\n\n  cost->initializeCosts(y, u, theta_c_shared, 0, dt);\n  __syncthreads();\n  running_cost[0] = cost->computeRunningCost(y, u, t, theta_c_shared, crash);\n\n  __syncthreads();\n  // if (threadIdx.y == 0)\n  // {\n  //   int num_zeros = 0;\n  //   for (int i = 0; i < blockDim.y; i++)\n  //   {\n  //     if (running_cost[i * blockDim.x] == 0)\n  //     {\n  //       num_zeros++;\n  //     }\n  //   }\n  //   if (num_zeros != blockDim.y - 1)\n  //   {\n  //     printf(\"Sample %d, t %d block_y %d: \", sample_idx, t, blockDim.y);\n  //     for (int i = 0; i < blockDim.y; i++)\n  //     {\n  //       printf(\"%f, \", running_cost[i * blockDim.x]);\n  //     }\n  //     printf(\"\\n\");\n  //   }\n  // }\n  // if (sample_idx == 0 && t == 0 && blockDim.y < 4)\n  // {\n  //   printf(\"Cost y %d: %f\\n\", threadIdx.y, running_cost[0]);\n  // }\n  // __syncthreads();\n\n  running_cost = &running_cost_shared[threadIdx.x + blockDim.x * blockDim.y * threadIdx.z];\n  __syncthreads();\n  mppi::kernels::costArrayReduction(running_cost, blockDim.y, threadIdx.y, blockDim.y, threadIdx.y == 0, blockDim.x);\n  // int prev_size = blockDim.y;\n  // // Allow for better computation when blockDim.x is a power of 2\n  // const bool block_power_of_2 = (prev_size & (prev_size - 1)) == 0;\n  // const int stop_condition = (block_power_of_2) ? 32 : 0;\n  // int size;\n  // const int xy_index = threadIdx.y;\n  // const int xy_step = blockDim.y;\n\n  // for (size = prev_size / 2; size > stop_condition; size /= 2)\n  // {\n  //   for (int j = xy_index; j < size; j += xy_step)\n  //   {\n  //     running_cost[j * blockDim.x] += running_cost[(j + size) * blockDim.x];\n  //   }\n  //   __syncthreads();\n  //   if (prev_size - 2 * size == 1 && xy_index == 0)\n  //   {\n  //     running_cost[(size - 1) * blockDim.x] += running_cost[(prev_size - 1) * blockDim.x];\n  //   }\n  //   __syncthreads();\n  //   prev_size = size;\n  // }\n\n  // if (xy_index < 32 && stop_condition != 0)\n  // {  // unroll the last warp\n  //   switch (size * 2)\n  //   {\n  //     case 64:\n  //       mppi::kernels::warpReduceAdd<64>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //     case 32:\n  //       mppi::kernels::warpReduceAdd<32>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //     case 16:\n  //       mppi::kernels::warpReduceAdd<16>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //     case 8:\n  //       mppi::kernels::warpReduceAdd<8>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //     case 4:\n  //       mppi::kernels::warpReduceAdd<4>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //     case 2:\n  //       mppi::kernels::warpReduceAdd<2>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //     case 1:\n  //       mppi::kernels::warpReduceAdd<1>(running_cost, xy_index, blockDim.x);\n  //       break;\n  //   }\n  // }\n  // __syncthreads();\n  // __syncthreads();\n  // if (sample_idx == 0 && t == 0 && blockDim.y < 4)\n  // {\n  //   printf(\"Final Cost y %d: %f\\n\", threadIdx.y, running_cost[0]);\n  // }\n  __syncthreads();\n  if (threadIdx.y == 0)\n  {\n    output_cost[global_mem_index] = running_cost[0];\n  }\n}\n\ntemplate <class COST_T>\n__global__ void computeTerminalCostTestKernel(COST_T* __restrict__ cost, const float* __restrict__ y_d,\n                                              int num_rollouts, float dt, float* __restrict__ output_cost)\n{\n  const int sample_idx = threadIdx.x + blockIdx.x * blockDim.x;\n  const int sys_index = threadIdx.z + blockDim.z * blockIdx.z;\n\n  extern __shared__ float entire_buffer[];\n  float* y_shared = entire_buffer;\n  float* u_shared = &y_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::OUTPUT_DIM)];\n  float* theta_c_shared = &u_shared[mppi::math::nearest_multiple_4(blockDim.x * blockDim.z * COST_T::CONTROL_DIM)];\n\n  const int shared_idx = threadIdx.x + blockDim.x * threadIdx.z;\n  float* y = &y_shared[shared_idx * COST_T::OUTPUT_DIM];\n  float* u = &u_shared[shared_idx * COST_T::CONTROL_DIM];\n\n  // Fill in shared memory\n  const int global_mem_index = num_rollouts * sys_index + sample_idx;\n  mp1::loadArrayParallel<mp1::Parallel1Dir::THREAD_Y>(y, 0, y_d, global_mem_index * COST_T::OUTPUT_DIM);\n  for (int i = threadIdx.y; i < COST_T::CONTROL_DIM; i += blockDim.y)\n  {\n    u[i] = 0.0f;\n  }\n  __syncthreads();\n\n  cost->initializeCosts(y, u, theta_c_shared, 0, dt);\n  __syncthreads();\n  output_cost[global_mem_index] = cost->terminalCost(y, theta_c_shared);\n}\n\ntemplate <class COST_T>\nvoid launchRunningCostTestKernel(COST_T& cost, std::vector<std::array<float, COST_T::OUTPUT_DIM>>& y,\n                                 std::vector<std::array<float, COST_T::CONTROL_DIM>>& u, int num_rollouts,\n                                 int num_timesteps, float dt, int dim_y, std::vector<float>& output_costs)\n{\n  if (y.size() != num_rollouts * num_timesteps)\n  {\n    std::cout << \"Number of outputs does not match num_rollouts * num_timesteps. Output: \" << y.size()\n              << \", num_rollouts: \" << num_rollouts << \", num_timesteps: \" << num_timesteps << std::endl;\n    exit(-1);\n  }\n  if (u.size() != y.size())\n  {\n    std::cout << \"Number of controls does not match number of outputs. Output: \" << y.size()\n              << \", Control: \" << u.size() << std::endl;\n    exit(-1);\n  }\n  dim3 block_dim = dim3(num_timesteps, dim_y, 1);\n  dim3 grid_dim = dim3(num_rollouts, 1, 1);\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  // Global Memory setup\n  float* y_d;\n  float* u_d;\n  float* output_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&y_d, sizeof(float) * num_rollouts * num_timesteps * COST_T::OUTPUT_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&u_d, sizeof(float) * num_rollouts * num_timesteps * COST_T::CONTROL_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&output_costs_d, sizeof(float) * num_rollouts * num_timesteps));\n\n  // Copy data to GPU\n  for (int k = 0; k < num_rollouts; k++)\n  {\n    for (int t = 0; t < num_timesteps; t++)\n    {\n      const int index = k * num_timesteps + t;\n      HANDLE_ERROR(cudaMemcpyAsync(y_d + index * COST_T::OUTPUT_DIM, y[index].data(),\n                                   sizeof(float) * COST_T::OUTPUT_DIM, cudaMemcpyHostToDevice, stream));\n      HANDLE_ERROR(cudaMemcpyAsync(u_d + index * COST_T::CONTROL_DIM, u[index].data(),\n                                   sizeof(float) * COST_T::CONTROL_DIM, cudaMemcpyHostToDevice, stream));\n    }\n  }\n\n  // Figure shared memory size\n  const int block_num_shared = block_dim.x * block_dim.z;\n  const int block_cost_shared_mem_size =\n      mppi::math::int_multiple_const(COST_T::SHARED_MEM_REQUEST_GRD_BYTES, sizeof(float4)) +\n      block_num_shared * mppi::math::int_multiple_const(COST_T::SHARED_MEM_REQUEST_BLK_BYTES, sizeof(float4));\n  unsigned compute_cost_shared_mem_size = (mppi::math::nearest_multiple_4(block_num_shared * COST_T::OUTPUT_DIM) +\n                                           mppi::math::nearest_multiple_4(block_num_shared * COST_T::CONTROL_DIM) +\n                                           mppi::math::nearest_multiple_4(block_num_shared * dim_y)) *\n                                              sizeof(float) +\n                                          mppi::math::nearest_multiple_4(block_num_shared) * sizeof(int) +\n                                          block_cost_shared_mem_size;\n  // Launch kernel\n  computeRunningCostTestKernel<COST_T><<<grid_dim, block_dim, compute_cost_shared_mem_size, stream>>>(\n      cost.cost_d_, y_d, u_d, num_rollouts, num_timesteps, dt, output_costs_d);\n\n  // Copy memory back to CPU\n  output_costs.resize(num_rollouts * num_timesteps);\n  HANDLE_ERROR(cudaMemcpyAsync(output_costs.data(), output_costs_d, sizeof(float) * num_rollouts * num_timesteps,\n                               cudaMemcpyDeviceToHost, stream));\n\n  // Free memory\n  HANDLE_ERROR(cudaFree(y_d));\n  HANDLE_ERROR(cudaFree(u_d));\n  HANDLE_ERROR(cudaFree(output_costs_d));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n}\n\ntemplate <class COST_T, class SAMPLING_T>\nvoid launchRolloutCostKernel(COST_T& cost, SAMPLING_T& sampler, std::vector<std::array<float, COST_T::OUTPUT_DIM>>& y,\n                             int num_rollouts, int num_timesteps, float dt, int dim_y, std::vector<float>& output_costs)\n{\n  if (y.size() != num_rollouts * num_timesteps)\n  {\n    std::cout << \"Number of outputs does not match num_rollouts * num_timesteps. Output: \" << y.size()\n              << \", num_rollouts: \" << num_rollouts << \", num_timesteps: \" << num_timesteps << std::endl;\n    exit(-1);\n  }\n  dim3 block_dim = dim3(num_timesteps, dim_y, 1);\n  dim3 grid_dim = dim3(num_rollouts, 1, 1);\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n  sampler.bindToStream(stream);\n\n  // Global Memory setup\n  float* y_d;\n  float* output_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&y_d, sizeof(float) * num_rollouts * num_timesteps * COST_T::OUTPUT_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&output_costs_d, sizeof(float) * num_rollouts * num_timesteps));\n\n  // Copy data to GPU\n  for (int k = 0; k < num_rollouts; k++)\n  {\n    for (int t = 0; t < num_timesteps; t++)\n    {\n      const int index = k * num_timesteps + t;\n      HANDLE_ERROR(cudaMemcpyAsync(y_d + index * COST_T::OUTPUT_DIM, y[index].data(),\n                                   sizeof(float) * COST_T::OUTPUT_DIM, cudaMemcpyHostToDevice, stream));\n    }\n  }\n\n  // Figure shared memory size\n  // const int block_num_shared = block_dim.x * block_dim.z;\n  unsigned compute_cost_shared_mem_size = mppi::kernels::calcRolloutCostKernelSharedMemSize(&cost, &sampler, block_dim);\n  //       sizeof(float) * (mppi::math::nearest_multiple_4(block_num_shared * COST_T::OUTPUT_DIM) +\n  //                        mppi::math::nearest_multiple_4(block_num_shared * COST_T::CONTROL_DIM) +\n  //                        mppi::math::nearest_multiple_4(block_num_shared * block_dim.y)) +\n  //       sizeof(int) * mppi::math::nearest_multiple_4(block_num_shared) +\n  //       mppi::math::int_multiple_const(COST_T::SHARED_MEM_REQUEST_GRD_BYTES, sizeof(float4)) +\n  //       block_num_shared * mppi::math::int_multiple_const(COST_T::SHARED_MEM_REQUEST_BLK_BYTES, sizeof(float4)) +\n  //       mppi::math::int_multiple_const(SAMPLING_T::SHARED_MEM_REQUEST_GRD_BYTES, sizeof(float4)) +\n  //       block_num_shared * mppi::math::int_multiple_const(SAMPLING_T::SHARED_MEM_REQUEST_BLK_BYTES, sizeof(float4));\n  // #ifdef USE_CUDA_BARRIERS_COST\n  //   compute_cost_shared_mem_size += mppi::math::int_multiple_const(block_num_shared * sizeof(barrier), 16);\n  // #endif\n  // Launch kernel\n  mppi::kernels::rolloutCostKernel<COST_T, SAMPLING_T><<<grid_dim, block_dim, compute_cost_shared_mem_size, stream>>>(\n      cost.cost_d_, sampler.sampling_d_, dt, num_timesteps, num_rollouts, 1.0, 0.0, y_d, output_costs_d);\n\n  // Copy memory back to CPU\n  output_costs.resize(num_rollouts);\n  HANDLE_ERROR(cudaMemcpyAsync(output_costs.data(), output_costs_d, sizeof(float) * num_rollouts,\n                               cudaMemcpyDeviceToHost, stream));\n\n  // Free memory\n  HANDLE_ERROR(cudaFree(y_d));\n  HANDLE_ERROR(cudaFree(output_costs_d));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n}\n\ntemplate <class COST_T>\nvoid launchTerminalCostTestKernel(COST_T& cost, std::vector<std::array<float, COST_T::OUTPUT_DIM>>& y, float dt,\n                                  int dim_x, int dim_y, std::vector<float>& output_costs)\n{\n  const int num_rollouts = y.size();\n  dim3 block_dim = dim3(dim_x, dim_y, 1);\n  dim3 grid_dim = dim3(num_rollouts, 1, 1);\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  // Global Memory setup\n  float* y_d;\n  float* output_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&y_d, sizeof(float) * num_rollouts * COST_T::OUTPUT_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&output_costs_d, sizeof(float) * num_rollouts));\n\n  // Copy data to GPU\n  for (int k = 0; k < num_rollouts; k++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(y_d + k * COST_T::OUTPUT_DIM, y[k].data(), sizeof(float) * COST_T::OUTPUT_DIM,\n                                 cudaMemcpyHostToDevice, stream));\n  }\n\n  // Figure shared memory size\n  const int block_num_shared = block_dim.x * block_dim.z;\n  const int block_cost_shared_mem_size =\n      mppi::math::int_multiple_const(COST_T::SHARED_MEM_REQUEST_GRD_BYTES, sizeof(float4)) +\n      block_num_shared * mppi::math::int_multiple_const(COST_T::SHARED_MEM_REQUEST_BLK_BYTES, sizeof(float4));\n  unsigned compute_cost_shared_mem_size = (mppi::math::nearest_multiple_4(block_num_shared * COST_T::OUTPUT_DIM) +\n                                           mppi::math::nearest_multiple_4(block_num_shared * COST_T::CONTROL_DIM)) *\n                                              sizeof(float) +\n                                          block_cost_shared_mem_size;\n  // Launch kernel\n  computeTerminalCostTestKernel<COST_T><<<grid_dim, block_dim, compute_cost_shared_mem_size, stream>>>(\n      cost.cost_d_, y_d, num_rollouts, dt, output_costs_d);\n\n  // Copy memory back to CPU\n  HANDLE_ERROR(cudaMemcpyAsync(output_costs.data(), output_costs_d, sizeof(float) * num_rollouts,\n                               cudaMemcpyDeviceToHost, stream));\n\n  // Free memory\n  HANDLE_ERROR(cudaFree(y_d));\n  HANDLE_ERROR(cudaFree(output_costs_d));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n}\n\ntemplate <class COST_T>\nvoid checkGPURolloutCost(COST_T& cost, float dt)\n{\n  cost.GPUSetup();\n  const int num_rollouts = 512;\n  const int num_timesteps = 8;\n#ifdef USE_ROLLOUT_COST_KERNEL\n  using SAMPLER_T = mppi::sampling_distributions::GaussianDistribution<typename COST_T::TEMPLATED_DYN_PARAMS>;\n  using SAMPLER_PARAMS = typename SAMPLER_T::SAMPLING_PARAMS_T;\n  SAMPLER_PARAMS params;\n  for (int i = 0; i < COST_T::CONTROL_DIM; i++)\n  {\n    params.std_dev[i] = 1.0;\n    params.control_cost_coeff[i] = 0.0;\n  }\n  params.num_rollouts = num_rollouts;\n  params.num_timesteps = num_timesteps;\n  SAMPLER_T sampler(params);\n  sampler.GPUSetup();\n\n  // Curand generator\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n  curandGenerator_t gen;\n  int seed = 42;\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetPseudoRandomGeneratorSeed(gen, seed);\n  curandSetGeneratorOffset(gen, 0);\n  curandSetStream(gen, stream);\n  sampler.bindToStream(stream);\n#endif\n\n  using control_trajectory = Eigen::Matrix<float, COST_T::CONTROL_DIM, num_timesteps>;\n  using output_trajectory = Eigen::Matrix<float, COST_T::OUTPUT_DIM, num_timesteps>;\n  std::vector<std::array<float, COST_T::OUTPUT_DIM>> y(num_timesteps * num_rollouts);\n#ifndef USE_ROLLOUT_COST_KERNEL\n  std::vector<std::array<float, COST_T::CONTROL_DIM>> u(num_timesteps * num_rollouts);\n#endif\n\n  std::vector<float> output_cost_gpu;\n\n  for (int n = 0; n < num_rollouts; n++)\n  {\n#ifndef USE_ROLLOUT_COST_KERNEL\n    control_trajectory u_traj = control_trajectory::Random();\n#endif\n    output_trajectory y_traj = output_trajectory::Random();\n    for (int t = 0; t < num_timesteps; t++)\n    {\n      int index = t + n * num_timesteps;\n      for (int i = 0; i < COST_T::OUTPUT_DIM; i++)\n      {\n        y[index][i] = y_traj(i, t);\n      }\n#ifndef USE_ROLLOUT_COST_KERNEL\n      for (int i = 0; i < COST_T::CONTROL_DIM; i++)\n      {\n        u[index][i] = u_traj(i, t);\n      }\n#endif\n    }\n  }\n\n#ifdef USE_ROLLOUT_COST_KERNEL\n  sampler.generateSamples(0, 0, gen, true);\n#endif\n\n  for (int y_dim = 1; y_dim < 32; y_dim++)\n  {\n#ifdef USE_ROLLOUT_COST_KERNEL\n    launchRolloutCostKernel<COST_T, SAMPLER_T>(cost, sampler, y, num_rollouts, num_timesteps, dt, y_dim,\n                                               output_cost_gpu);\n#else\n    launchRunningCostTestKernel<COST_T>(cost, y, u, num_rollouts, num_timesteps, dt, y_dim, output_cost_gpu);\n#endif\n    for (int n = 0; n < num_rollouts; n++)\n    {\n#ifdef USE_ROLLOUT_COST_KERNEL\n      control_trajectory u_traj_n;\n      HANDLE_ERROR(cudaMemcpy(u_traj_n.data(), sampler.getControlSample(n, 0, 0),\n                              sizeof(float) * num_timesteps * COST_T::CONTROL_DIM, cudaMemcpyDeviceToHost));\n      float total_cost = 0;\n      for (int t = 0; t < num_timesteps; t++)\n      {\n        int index = t + n * num_timesteps;\n        typename COST_T::control_array u_index = u_traj_n.col(t);\n        Eigen::Map<typename COST_T::output_array> y_index(y[index].data());\n#else\n      for (int t = 0; t < num_timesteps; t++)\n      {\n        int index = t + n * num_timesteps;\n        Eigen::Map<typename COST_T::control_array> u_index(u[index].data());\n        Eigen::Map<typename COST_T::output_array> y_index(y[index].data());\n#endif\n\n        int crash = 0;\n        float cpu_cost = cost.computeRunningCost(y_index, u_index, t, &crash);\n#ifdef USE_ROLLOUT_COST_KERNEL\n        cpu_cost += sampler.computeLikelihoodRatioCost(u_index, n, t, 0);\n        total_cost += cpu_cost;\n#else\n        ASSERT_NEAR(cpu_cost, output_cost_gpu[index], cpu_cost * 0.0015)\n            << \" Sample \" << n << \", t \" << t << \" y_dim \" << y_dim;\n#endif\n      }\n#ifdef USE_ROLLOUT_COST_KERNEL\n      total_cost /= num_timesteps;\n      Eigen::Map<typename COST_T::output_array> y_index(y[(n + 1) * num_timesteps - 1].data());\n      total_cost += cost.terminalCost(y_index) / num_timesteps;\n      ASSERT_NEAR(total_cost, output_cost_gpu[n], total_cost * 0.001) << \" Sample \" << n << \" y_dim \" << y_dim;\n#endif\n    }\n  }\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/cost_functions/cost_generic_kernel_tests.cuh",
    "content": "#pragma once\n\n#include <mppi/utils/math_utils.h>\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n\n#include <array>\n#include <iostream>\n#include <vector>\n\nnamespace mp1 = ::mppi::p1;\n#define USE_ROLLOUT_COST_KERNEL\n\ntemplate <class COST_T>\n__global__ void computeRunningCostTestKernel(COST_T* __restrict__ cost, const float* __restrict__ y_d,\n                                             const float* __restrict__ u_d, int num_rollouts, int num_timesteps,\n                                             float dt, float* __restrict__ output_cost);\n\ntemplate <class COST_T>\n__global__ void computeTerminalCostTestKernel(COST_T* __restrict__ cost, const float* __restrict__ y_d,\n                                              int num_rollouts, float dt, float* __restrict__ output_cost);\n\ntemplate <class COST_T>\nvoid launchRunningCostTestKernel(COST_T& cost, std::vector<std::array<float, COST_T::OUTPUT_DIM>>& y,\n                                 std::vector<std::array<float, COST_T::CONTROL_DIM>>& u, int num_rollouts,\n                                 int num_timesteps, float dt, int dim_y, std::vector<float>& output_costs);\n\ntemplate <class COST_T, class SAMPLING_T>\nvoid launchRolloutCostKernel(COST_T& cost, SAMPLING_T& sampler, std::vector<std::array<float, COST_T::OUTPUT_DIM>>& y,\n                             int num_rollouts, int num_timesteps, float dt, int dim_y,\n                             std::vector<float>& output_costs);\n\ntemplate <class COST_T>\nvoid launchTerminalCostTestKernel(COST_T& cost, std::vector<std::array<float, COST_T::OUTPUT_DIM>>& y, float dt,\n                                  int dim_x, int dim_y, std::vector<float>& output_costs);\n\ntemplate <class COST_T>\nvoid checkGPURolloutCost(COST_T& cost, float dt);\n\n#ifdef __CUDACC__\n#include \"cost_generic_kernel_tests.cu\"\n#endif\n"
  },
  {
    "path": "tests/include/kernel_tests/dynamics/autorally/ar_nn_dynamics_kernel_test.cu",
    "content": "template <class NETWORK_T, int THETA_SIZE, int STRIDE_SIZE, int NUM_LAYERS>\n__global__ void parameterCheckTestKernel(NETWORK_T* model, float* theta, int* stride, int* net_structure)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    for (int i = 0; i < THETA_SIZE; i++)\n    {\n      theta[i] = model->getThetaPtr()[i];\n    }\n    for (int i = 0; i < STRIDE_SIZE; i++)\n    {\n      stride[i] = model->getStrideIdcsPtr()[i];\n    }\n    for (int i = 0; i < NUM_LAYERS; i++)\n    {\n      net_structure[i] = model->getNetStructurePtr()[i];\n    }\n  }\n}\n\ntemplate <class NETWORK_T, int THETA_SIZE, int STRIDE_SIZE, int NUM_LAYERS>\nvoid launchParameterCheckTestKernel(NETWORK_T& model, std::array<float, THETA_SIZE>& theta,\n                                    std::array<int, STRIDE_SIZE>& stride, std::array<int, NUM_LAYERS>& net_structure)\n{\n  float* theta_d;\n  int* stride_d;\n  int* net_structure_d;\n\n  HANDLE_ERROR(cudaMalloc((void**)&theta_d, sizeof(float) * theta.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&stride_d, sizeof(int) * stride.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&net_structure_d, sizeof(int) * net_structure.size()))\n\n  dim3 threadsPerBlock(1, 1);\n  dim3 numBlocks(1, 1);\n  parameterCheckTestKernel<NETWORK_T, THETA_SIZE, STRIDE_SIZE, NUM_LAYERS>\n      <<<numBlocks, threadsPerBlock>>>(model.model_d_, theta_d, stride_d, net_structure_d);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(theta.data(), theta_d, sizeof(float) * theta.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(stride.data(), stride_d, sizeof(int) * stride.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(\n      cudaMemcpy(net_structure.data(), net_structure_d, sizeof(int) * net_structure.size(), cudaMemcpyDeviceToHost))\n  cudaDeviceSynchronize();\n\n  cudaFree(theta_d);\n  cudaFree(stride_d);\n  cudaFree(net_structure_d);\n}\n\ntemplate <class NETWORK_T, int S_DIM, int C_DIM, int BLOCKDIM_X, int BLOCKDIM_Z>\n__global__ void fullARNNTestKernel(NETWORK_T* model, float* state, float* control, float* state_der, float dt)\n{\n  __shared__ float theta[NETWORK_T::SHARED_MEM_REQUEST_GRD_BYTES / sizeof(float) + 1 +\n                         NETWORK_T::SHARED_MEM_REQUEST_BLK_BYTES * BLOCKDIM_X * BLOCKDIM_Z];\n  __shared__ float output[S_DIM * BLOCKDIM_X * BLOCKDIM_Z];\n  __shared__ float s_der[S_DIM * BLOCKDIM_X * BLOCKDIM_Z];\n\n  model->initializeDynamics(state, control, output, theta, 0.0f, 0.0f);\n\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // calls enforce constraints -> compute state derivative -> increment state\n\n  // printf(\"before enforceConstraints %d, %d\\n\", threadIdx.x, threadIdx.y);\n  // printf(\"enforceConstraints %d, %d\\n\", threadIdx.x, threadIdx.y);\n  // printf(\"before enforce Constraints %f, %f\\n\", (control+(tid*C_DIM))[0], (control+(tid*C_DIM))[1]);\n  model->enforceConstraints(state + (tid * S_DIM), control + (tid * C_DIM));\n  __syncthreads();\n  // printf(\"after enforce Constraints %f, %f\\n\", (control+(tid*C_DIM))[0], (control+(tid*C_DIM))[1]);\n  model->computeStateDeriv(state + (tid * S_DIM), control + (tid * C_DIM), state_der + (tid * S_DIM), theta);\n  __syncthreads();\n\n  int shared_indexer = BLOCKDIM_X * threadIdx.z + threadIdx.x;\n  for (int i = threadIdx.y; i < S_DIM; i += blockDim.y)\n  {\n    // printf(\"index for shared %d, %d\\n\", S_DIM*shared_indexer + i, S_DIM*tid+i);\n    s_der[S_DIM * shared_indexer + i] = state_der[S_DIM * tid + i];\n  }\n  __syncthreads();\n\n  // if(threadIdx.y == 0) {\n  // printf(\"state_der = %f, %f, %f, %f, %f, %f, %f\\n\", state_der[0], state_der[1], state_der[2], state_der[3],\n  // state_der[4], state_der[5], state_der[6]); printf(\"state = %f, %f, %f, %f, %f, %f, %f\\n\", state[0], state[1],\n  // state[2], state[3], state[4], state[5], state[6]); printf(\"s_der = %f, %f, %f, %f, %f, %f, %f\\n\", s_der[0],\n  // s_der[1], s_der[2], s_der[3], s_der[4], s_der[5], s_der[6]);\n  //}\n\n  model->updateState(state + (tid * S_DIM), s_der + (shared_indexer * S_DIM), dt);\n}\n\ntemplate <class NETWORK_T, int S_DIM, int C_DIM, int BLOCKDIM_X = 1, int BLOCKDIM_Z = 1>\nvoid launchFullARNNTestKernel(NETWORK_T& model, std::vector<std::array<float, S_DIM>>& state,\n                              std::vector<std::array<float, C_DIM>>& control,\n                              std::vector<std::array<float, S_DIM>>& state_der, float dt, int dim_y)\n{\n  float* state_d;\n  float* state_der_d;\n  float* control_d;\n\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * S_DIM * state.size() * BLOCKDIM_Z))\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_d, sizeof(float) * S_DIM * state_der.size() * BLOCKDIM_Z))\n  HANDLE_ERROR(cudaMalloc((void**)&control_d, sizeof(float) * C_DIM * control.size() * BLOCKDIM_Z))\n\n  HANDLE_ERROR(cudaMemcpy(state_d, state.data(), sizeof(float) * S_DIM * state.size(), cudaMemcpyHostToDevice))\n  HANDLE_ERROR(\n      cudaMemcpy(state_der_d, state_der.data(), sizeof(float) * S_DIM * state_der.size(), cudaMemcpyHostToDevice))\n  HANDLE_ERROR(cudaMemcpy(control_d, control.data(), sizeof(float) * C_DIM * control.size(), cudaMemcpyHostToDevice))\n\n  // make sure you cannot use invalid inputs\n  dim3 threadsPerBlock(state.size(), dim_y, BLOCKDIM_Z);\n  dim3 numBlocks(1, 1);\n  // launch kernel\n  fullARNNTestKernel<NETWORK_T, S_DIM, C_DIM, BLOCKDIM_X, BLOCKDIM_Z>\n      <<<numBlocks, threadsPerBlock>>>(model.model_d_, state_d, control_d, state_der_d, dt);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(state.data(), state_d, sizeof(float) * S_DIM * state.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(\n      cudaMemcpy(state_der.data(), state_der_d, sizeof(float) * S_DIM * state_der.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(control.data(), control_d, sizeof(float) * C_DIM * control.size(), cudaMemcpyDeviceToHost))\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(state_der_d);\n  cudaFree(control_d);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/dynamics/autorally/ar_nn_dynamics_kernel_test.cuh",
    "content": "#ifndef AR_NN_DYNAMICS_KERNEL_TEST_CUH\n#define AR_NN_DYNAMICS_KERNEL_TEST_CUH\n\n#include <array>\n#include <mppi/dynamics/autorally/ar_nn_model.cuh>\n#include \"../dynamics_generic_kernel_tests.cuh\"\n#include <cuda_runtime.h>\n\ntemplate <class NETWORK_T, int THETA_SIZE, int STRIDE_SIZE, int NUM_LAYERS>\nvoid launchParameterCheckTestKernel(NETWORK_T& model, std::array<float, THETA_SIZE>& theta,\n                                    std::array<int, STRIDE_SIZE>& stride, std::array<int, NUM_LAYERS>& net_structure);\ntemplate <class NETWORK_T, int THETA_SIZE, int STRIDE_SIZE, int NUM_LAYERS>\n__global__ void parameterCheckTestKernel(NETWORK_T* model, float* theta, int* stride, int* net_structure);\n\ntemplate <class NETWORK_T, int S_DIM, int C_DIM>\nvoid launchFullARNNTestKernel(NETWORK_T& model, std::vector<std::array<float, S_DIM>>& state,\n                              std::vector<std::array<float, C_DIM>>& control,\n                              std::vector<std::array<float, S_DIM>>& state_der, float dt, int dim_y);\ntemplate <class NETWORK_T, int S_DIM, int C_DIM>\n__global__ void fullARNNTestKernel(NETWORK_T* model, float* state, float* control, float* state_der, float dt);\n\n#include \"ar_nn_dynamics_kernel_test.cu\"\n\n#endif  // AR_NN_DYNAMICS_KERNEL_TEST_CUH\n"
  },
  {
    "path": "tests/include/kernel_tests/dynamics/cartpole/cartpole_dynamics_kernel_test.cu",
    "content": "//\n// Created by mgandhi3 on 1/7/20.\n//\n/**\n * Kernels to test device functions\n */\n#include \"cartpole_dynamics_kernel_test.cuh\"\n\n__global__ void CartMassTestKernel(CartpoleDynamics* CP, float& mass_check)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid == 0)\n  {\n    // printf(\"This is gravity: %f\\n\", CP->getGravity());\n    // printf(\"This is the mass of the cart: %f\\n\", CP->getCartMass());\n    // printf(\"This is the mass of the pole: %f\\n\", CP->getPoleMass());\n    // printf(\"This is the length of the pole: %f\\n\", CP->getPoleLength());\n    // printf(\"This is the value of GPUMemstatus on the GPU: %d\\n\", CP->GPUMemStatus_);\n    // printf(\"This is the value of CP_device on the GPU: %d\\n\", CP->CP_device);\n    mass_check = CP->getCartMass();\n  }\n}\n\n__global__ void PoleMassTestKernel(CartpoleDynamics* CP, float& mass_check)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    mass_check = CP->getPoleMass();\n  }\n}\n\n__global__ void GravityTestKernel(CartpoleDynamics* CP, float& gravity_check)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    gravity_check = CP->getGravity();\n  }\n}\n\n__global__ void PoleLengthTestKernel(CartpoleDynamics* CP, float& length_check)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    length_check = CP->getPoleLength();\n  }\n}\n\n__global__ void DynamicsTestKernel(CartpoleDynamics* CP, float* state, float* control, float* state_der)\n{\n  // int tid = blockIdx.x*blockDim.x + threadIdx.x;\n  /**\n   * This will probably do stupid things because of parallelization\n   * Fix later\n   */\n  CP->computeDynamics(state, control, state_der);\n}\n\n/**\n * Wrapper for kernels\n */\n\nvoid launchCartMassTestKernel(const CartpoleDynamics& CP, float& mass_check)\n{\n  // Allocate memory on the CPU for checking the mass\n  float* mass_check_device;\n  HANDLE_ERROR(cudaMalloc((void**)&mass_check_device, sizeof(float)));\n\n  CartMassTestKernel<<<1, 1>>>(CP.model_d_, *mass_check_device);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(&mass_check, mass_check_device, sizeof(float), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(mass_check_device);\n}\n\nvoid launchPoleMassTestKernel(const CartpoleDynamics& CP, float& mass_check)\n{\n  // Allocate memory on the CPU for checking the mass\n  float* mass_check_device;\n  HANDLE_ERROR(cudaMalloc((void**)&mass_check_device, sizeof(float)));\n\n  PoleMassTestKernel<<<1, 1>>>(CP.model_d_, *mass_check_device);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(&mass_check, mass_check_device, sizeof(float), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(mass_check_device);\n}\n\nvoid launchPoleLengthTestKernel(const CartpoleDynamics& CP, float& length_check)\n{\n  // Allocate memory on the CPU for checking the mass\n  float* length_check_device;\n  HANDLE_ERROR(cudaMalloc((void**)&length_check_device, sizeof(float)));\n\n  PoleLengthTestKernel<<<1, 1>>>(CP.model_d_, *length_check_device);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(&length_check, length_check_device, sizeof(float), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(length_check_device);\n}\n\nvoid launchGravityTestKernel(const CartpoleDynamics& CP, float& gravity_check)\n{\n  // Allocate memory on the CPU for checking the mass\n  float* gravity_check_device;\n  HANDLE_ERROR(cudaMalloc((void**)&gravity_check_device, sizeof(float)));\n\n  GravityTestKernel<<<1, 1>>>(CP.model_d_, *gravity_check_device);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(&gravity_check, gravity_check_device, sizeof(float), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(gravity_check_device);\n}\n\nvoid launchDynamicsTestKernel(const CartpoleDynamics& CP, float* state_cpu, float* control_cpu, float* state_der_cpu)\n{\n  // Allocate memory on the CPU for checking the mass\n  float* state_gpu;\n  float* control_gpu;\n  float* state_der_gpu;\n\n  HANDLE_ERROR(cudaMalloc((void**)&state_gpu, sizeof(float) * CP.STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&control_gpu, sizeof(float) * CP.CONTROL_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_gpu, sizeof(float) * CP.STATE_DIM));\n\n  HANDLE_ERROR(cudaMemcpy(state_gpu, state_cpu, sizeof(float) * CP.STATE_DIM, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(control_gpu, control_cpu, sizeof(float) * CP.CONTROL_DIM, cudaMemcpyHostToDevice));\n\n  DynamicsTestKernel<<<1, 1>>>(CP.model_d_, state_gpu, control_gpu, state_der_gpu);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(state_der_cpu, state_der_gpu, sizeof(float) * CP.STATE_DIM, cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_gpu);\n  cudaFree(control_gpu);\n  cudaFree(state_der_gpu);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/dynamics/cartpole/cartpole_dynamics_kernel_test.cuh",
    "content": "#pragma once\n\n#ifndef KERNEL_TESTS_DYNAMICS_CARTPOLE_CARTPOLE_KERNEL_TEST_CUH_\n#define KERNEL_TESTS_DYNAMICS_CARTPOLE_CARTPOLE_KERNEL_TEST_CUH_\n\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n\n__global__ void CartMassTestKernel(CartpoleDynamics* CP, float& mass_check);\n__global__ void PoleMassTestKernel(CartpoleDynamics* CP, float& mass_check);\n__global__ void PoleLengthTestKernel(CartpoleDynamics* CP, float& length_check);\n__global__ void GravityTestKernel(CartpoleDynamics* CP, float& gravity_check);\n__global__ void DynamicsTestKernel(CartpoleDynamics* CP, float* state, float* control, float* state_der);\n\nvoid launchCartMassTestKernel(const CartpoleDynamics&, float& mass_check);\nvoid launchPoleMassTestKernel(const CartpoleDynamics&, float& mass_check);\nvoid launchPoleLengthTestKernel(const CartpoleDynamics&, float& length_check);\nvoid launchGravityTestKernel(const CartpoleDynamics&, float& gravity_check);\nvoid launchDynamicsTestKernel(const CartpoleDynamics&, float* state_cpu, float* control_cpu, float* state_der_cpu);\n\n#ifdef __CUDACC__\n#include \"cartpole_dynamics_kernel_test.cu\"\n#endif\n\n#endif  //! KERNEL_TESTS_DYNAMICS_CARTPOLE_CARTPOLE_KERNEL_TEST_CUH_\n"
  },
  {
    "path": "tests/include/kernel_tests/dynamics/dynamics_generic_kernel_tests.cu",
    "content": "#include <mppi/core/mppi_common.cuh>\n\ntemplate <typename CLASS_T, typename PARAMS_T>\n__global__ void parameterTestKernel(CLASS_T* class_t, PARAMS_T& params)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    params = class_t->getParams();\n  }\n}\n\ntemplate <typename CLASS_T, typename PARAMS_T>\nvoid launchParameterTestKernel(CLASS_T& class_t, PARAMS_T& params)\n{\n  PARAMS_T* params_d;\n  HANDLE_ERROR(cudaMalloc((void**)&params_d, sizeof(PARAMS_T)))\n\n  parameterTestKernel<CLASS_T, PARAMS_T><<<1, 1>>>(static_cast<CLASS_T*>(class_t.model_d_), *params_d);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(&params, params_d, sizeof(PARAMS_T), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(params_d);\n}\n\ntemplate <class DYN_T>\n__global__ void getSharedMemorySizesKernel(DYN_T* __restrict__ dynamics, int* __restrict__ output_d)\n{\n  output_d[0] = dynamics->getGrdSharedSizeBytes();\n  output_d[1] = dynamics->getBlkSharedSizeBytes();\n}\n\ntemplate <typename DYNAMICS_T>\nvoid launchGetSharedMemorySizesKernel(DYNAMICS_T& dynamics, int shared_mem_sizes[2])\n{\n  int* shared_mem_sizes_d;\n  HANDLE_ERROR(cudaMalloc((void**)&shared_mem_sizes_d, sizeof(int) * 2));\n\n  getSharedMemorySizesKernel<DYNAMICS_T><<<1, 1>>>(dynamics.model_d_, shared_mem_sizes_d);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(shared_mem_sizes, shared_mem_sizes_d, sizeof(int) * 2, cudaMemcpyDeviceToHost));\n\n  HANDLE_ERROR(cudaFree(shared_mem_sizes_d));\n}\n\ntemplate <typename DYNAMICS_T, int C_DIM>\n__global__ void controlRangesTestKernel(DYNAMICS_T* dynamics, float2* control_rngs)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid == 0)\n  {\n    float2* raw_ptr = dynamics->getControlRangesRaw();\n    for (int i = 0; i < C_DIM; i++)\n    {\n      control_rngs[i].x = raw_ptr[i].x;\n      control_rngs[i].y = raw_ptr[i].y;\n    }\n  }\n}\n\ntemplate <typename DYNAMICS_T, int C_DIM>\nvoid launchControlRangesTestKernel(DYNAMICS_T& dynamics, std::array<float2, C_DIM>& control_rngs)\n{\n  float2* ranges_d;\n  HANDLE_ERROR(cudaMalloc((void**)&ranges_d, sizeof(float2) * control_rngs.size()))\n\n  controlRangesTestKernel<DYNAMICS_T, C_DIM><<<1, 1>>>(static_cast<DYNAMICS_T*>(dynamics.model_d_), ranges_d);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(control_rngs.data(), ranges_d, sizeof(float2) * control_rngs.size(), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(ranges_d);\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM>\n__global__ void enforceConstraintTestKernel(DYNAMICS_T* dynamics, float* state, float* control, int num)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    dynamics->enforceConstraints(&state[tid * S_DIM], &control[tid * C_DIM]);\n  }\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM>\nvoid launchEnforceConstraintTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                       std::vector<std::array<float, C_DIM>>& control, int dim_y)\n{\n  int count = state.size();\n  float* state_d;\n  float* control_d;\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * S_DIM * state.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&control_d, sizeof(float) * C_DIM * control.size()))\n\n  HANDLE_ERROR(cudaMemcpy(state_d, state.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(control_d, control.data(), sizeof(float) * C_DIM * count, cudaMemcpyHostToDevice));\n\n  dim3 threadsPerBlock(count, dim_y);\n  dim3 numBlocks(1, 1);\n  enforceConstraintTestKernel<DYNAMICS_T, S_DIM, C_DIM>\n      <<<numBlocks, threadsPerBlock>>>(static_cast<DYNAMICS_T*>(dynamics.model_d_), state_d, control_d, count);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(state.data(), state_d, sizeof(float) * S_DIM * state.size(), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(control.data(), control_d, sizeof(float) * C_DIM * control.size(), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(control_d);\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM>\n__global__ void updateStateTestKernel(DYNAMICS_T* dynamics, float* state, float* state_der, float dt, int num)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    dynamics->updateState(state + (tid * S_DIM), state_der + (tid * S_DIM), dt);\n  }\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM>\nvoid launchUpdateStateTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                 std::vector<std::array<float, S_DIM>>& state_der, float dt, int dim_y)\n{\n  int count = state.size();\n  float* state_d;\n  float* state_der_d;\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * S_DIM * count))\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_d, sizeof(float) * S_DIM * count))\n\n  HANDLE_ERROR(cudaMemcpy(state_d, state.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(state_der_d, state_der.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n\n  dim3 threadsPerBlock(count, dim_y);\n  dim3 numBlocks(1, 1);\n  updateStateTestKernel<DYNAMICS_T, S_DIM>\n      <<<numBlocks, threadsPerBlock>>>(static_cast<DYNAMICS_T*>(dynamics.model_d_), state_d, state_der_d, dt, count);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(state.data(), state_d, sizeof(float) * S_DIM * state.size(), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(state_der.data(), state_der_d, sizeof(float) * S_DIM * state_der.size(), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(state_der_d);\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM>\n__global__ void computeKinematicsTestKernel(DYNAMICS_T* dynamics, float* state, float* state_der, int num)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num && threadIdx.y == 0)\n  {\n    dynamics->computeKinematics(state + (tid * S_DIM), state_der + (tid * S_DIM));\n  }\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM>\nvoid launchComputeKinematicsTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                       std::vector<std::array<float, S_DIM>>& state_der, int dim_y)\n{\n  int count = state.size();\n  float* state_d;\n  float* state_der_d;\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * S_DIM * state.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_d, sizeof(float) * S_DIM * state_der.size()))\n\n  HANDLE_ERROR(cudaMemcpy(state_d, state.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(state_der_d, state_der.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n\n  dim3 threadsPerBlock(count, dim_y);\n  dim3 numBlocks(1, 1);\n  computeKinematicsTestKernel<DYNAMICS_T, S_DIM>\n      <<<numBlocks, threadsPerBlock>>>(static_cast<DYNAMICS_T*>(dynamics.model_d_), state_d, state_der_d, count);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(state.data(), state_d, sizeof(float) * S_DIM * state.size(), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(state_der.data(), state_der_d, sizeof(float) * S_DIM * state_der.size(), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(state_der_d);\n}\n\ntemplate <class DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X>\n__global__ void computeDynamicsTestKernel(DYNAMICS_T* model, float* state, float* control, float* state_der, int count)\n{\n  extern __shared__ float entire_buffer[];\n\n  float* output = entire_buffer;\n  float* theta = &output[mppi::math::nearest_multiple_4(blockDim.x * DYNAMICS_T::OUTPUT_DIM)];\n\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n\n  model->initializeDynamics(state, control, output, theta, 0.0f, 0.0f);\n  __syncthreads();\n\n  if (tid < count)\n  {\n    model->computeDynamics(state + (tid * S_DIM), control + (tid * C_DIM), state_der + (tid * S_DIM), theta);\n  }\n}\n\ntemplate <class DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X>\nvoid launchComputeDynamicsTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                     std::vector<std::array<float, C_DIM>>& control,\n                                     std::vector<std::array<float, S_DIM>>& state_der, int dim_y)\n{\n  int count = state.size();\n  float* state_d;\n  float* state_der_d;\n  float* control_d;\n\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * S_DIM * count))\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_d, sizeof(float) * S_DIM * count))\n  HANDLE_ERROR(cudaMalloc((void**)&control_d, sizeof(float) * C_DIM * count))\n\n  HANDLE_ERROR(cudaMemcpy(state_d, state.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(state_der_d, state_der.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(control_d, control.data(), sizeof(float) * C_DIM * count, cudaMemcpyHostToDevice));\n\n  // make sure you cannot use invalid inputs\n  const int gridsize_x = (count - 1) / BLOCKDIM_X + 1;\n  dim3 threadsPerBlock(BLOCKDIM_X, dim_y);\n  dim3 numBlocks(gridsize_x, 1);\n  unsigned shared_mem = mppi::kernels::calcClassSharedMemSize(&dynamics, threadsPerBlock) +\n                        mppi::math::nearest_multiple_4(threadsPerBlock.x * DYNAMICS_T::OUTPUT_DIM);\n  // launch kernel\n  computeDynamicsTestKernel<DYNAMICS_T, S_DIM, C_DIM, BLOCKDIM_X>\n      <<<numBlocks, threadsPerBlock, shared_mem>>>(dynamics.model_d_, state_d, control_d, state_der_d, count);\n  HANDLE_ERROR(cudaGetLastError());\n\n  HANDLE_ERROR(cudaMemcpy(state.data(), state_d, sizeof(float) * S_DIM * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(state_der.data(), state_der_d, sizeof(float) * S_DIM * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(control.data(), control_d, sizeof(float) * C_DIM * count, cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(state_der_d);\n  cudaFree(control_d);\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X>\n__global__ void computeStateDerivTestKernel(DYNAMICS_T* dynamics, float* state, float* control, float* state_der,\n                                            int num)\n{\n  extern __shared__ float entire_buffer[];\n\n  float* output = entire_buffer;\n  float* theta = &output[mppi::math::nearest_multiple_4(blockDim.x * DYNAMICS_T::OUTPUT_DIM)];\n\n  dynamics->initializeDynamics(state, control, output, theta, 0.0f, 0.0f);\n  __syncthreads();\n\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    dynamics->computeStateDeriv(state + (tid * S_DIM), control + (tid * C_DIM), state_der + (tid * S_DIM), theta);\n  }\n}\n\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X>\nvoid launchComputeStateDerivTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                       std::vector<std::array<float, C_DIM>>& control,\n                                       std::vector<std::array<float, S_DIM>>& state_der, int dim_y)\n{\n  int count = state.size();\n  float* state_d;\n  float* control_d;\n  float* state_der_d;\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * S_DIM * state.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_d, sizeof(float) * S_DIM * state_der.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&control_d, sizeof(float) * C_DIM * control.size()))\n\n  HANDLE_ERROR(cudaMemcpy(state_d, state.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(state_der_d, state_der.data(), sizeof(float) * S_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(control_d, control.data(), sizeof(float) * C_DIM * count, cudaMemcpyHostToDevice));\n\n  const int gridsize_x = (count - 1) / BLOCKDIM_X + 1;\n  dim3 threadsPerBlock(BLOCKDIM_X, dim_y);\n  dim3 numBlocks(gridsize_x, 1);\n\n  unsigned shared_mem = mppi::kernels::calcClassSharedMemSize(&dynamics, threadsPerBlock) +\n                        mppi::math::nearest_multiple_4(threadsPerBlock.x * DYNAMICS_T::OUTPUT_DIM);\n  computeStateDerivTestKernel<DYNAMICS_T, S_DIM, C_DIM, BLOCKDIM_X><<<numBlocks, threadsPerBlock, shared_mem>>>(\n      static_cast<DYNAMICS_T*>(dynamics.model_d_), state_d, control_d, state_der_d, count);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(state.data(), state_d, sizeof(float) * S_DIM * state.size(), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(state_der.data(), state_der_d, sizeof(float) * S_DIM * state_der.size(), cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(control.data(), control_d, sizeof(float) * C_DIM * control.size(), cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(state_der_d);\n  cudaFree(control_d);\n}\n\ntemplate <typename DYNAMICS_T>\n__global__ void stepTestKernel(DYNAMICS_T* dynamics, float* state, float* control, float* state_der, float* next_state,\n                               float* output, int t, float dt, int num)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n\n  extern __shared__ float entire_buffer[];\n  float* theta = entire_buffer;\n\n  // float* theta = reinterpret_cast<float*>(theta_s4);\n  float* x = state + (tid * DYNAMICS_T::STATE_DIM);\n  float* x_dot = state_der + (tid * DYNAMICS_T::STATE_DIM);\n  float* x_next = next_state + (tid * DYNAMICS_T::STATE_DIM);\n  float* u = control + (tid * DYNAMICS_T::CONTROL_DIM);\n  float* y = output + (tid * DYNAMICS_T::OUTPUT_DIM);\n\n  if (tid < num)\n  {\n    dynamics->initializeDynamics(state, control, output, theta, 0.0f, dt);\n  }\n  __syncthreads();\n\n  if (tid < num)\n  {\n    dynamics->enforceConstraints(x, u);\n    dynamics->step(x, x_next, x_dot, u, y, theta, t, dt);\n  }\n}\n\ntemplate <typename DYNAMICS_T, int BLOCKDIM_X = 32>  // here for compatability\nvoid launchStepTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, DYNAMICS_T::STATE_DIM>>& state,\n                          std::vector<std::array<float, DYNAMICS_T::CONTROL_DIM>>& control,\n                          std::vector<std::array<float, DYNAMICS_T::STATE_DIM>>& state_der,\n                          std::vector<std::array<float, DYNAMICS_T::STATE_DIM>>& next_state, int t, float dt, int dim_y,\n                          int dim_x = 32)\n{\n  std::vector<std::array<float, DYNAMICS_T::OUTPUT_DIM>> output(state.size());\n  launchStepTestKernel(dynamics, state, control, state_der, next_state, output, t, dt, dim_y, dim_x);\n}\n\ntemplate <typename DYNAMICS_T>\nvoid launchStepTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, DYNAMICS_T::STATE_DIM>>& state,\n                          std::vector<std::array<float, DYNAMICS_T::CONTROL_DIM>>& control,\n                          std::vector<std::array<float, DYNAMICS_T::STATE_DIM>>& state_der,\n                          std::vector<std::array<float, DYNAMICS_T::STATE_DIM>>& next_state,\n                          std::vector<std::array<float, DYNAMICS_T::OUTPUT_DIM>>& output, int t, float dt, int dim_y,\n                          int dim_x)\n{\n  if (state.size() != control.size())\n  {\n    std::cerr << \"Num States doesn't match num controls\" << std::endl;\n    return;\n  }\n  if (state.size() != state_der.size())\n  {\n    std::cerr << \"Num States doesn't match num state_ders\" << std::endl;\n    return;\n  }\n  if (state.size() != next_state.size())\n  {\n    std::cerr << \"Num States doesn't match num next_states\" << std::endl;\n    return;\n  }\n  if (state.size() % dim_x != 0)\n  {\n    std::cerr << \"Num States needs be evenly divisible by dim_x: \" << state.size()\n              << \" state queries, \" << dim_x << \" parallelizable threads per block\" << std::endl;\n    return;\n  }\n  int count = state.size();\n  float* state_d;\n  float* control_d;\n  float* state_der_d;\n  float* next_state_d;\n  float* output_d;\n  HANDLE_ERROR(cudaMalloc((void**)&state_d, sizeof(float) * DYNAMICS_T::STATE_DIM * count));\n  HANDLE_ERROR(cudaMalloc((void**)&state_der_d, sizeof(float) * DYNAMICS_T::STATE_DIM * count));\n  HANDLE_ERROR(cudaMalloc((void**)&next_state_d, sizeof(float) * DYNAMICS_T::STATE_DIM * count));\n  HANDLE_ERROR(cudaMalloc((void**)&control_d, sizeof(float) * DYNAMICS_T::CONTROL_DIM * count));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * DYNAMICS_T::OUTPUT_DIM * count));\n\n  HANDLE_ERROR(\n      cudaMemcpy(state_d, state.data(), sizeof(float) * DYNAMICS_T::STATE_DIM * count, cudaMemcpyHostToDevice));\n  // HANDLE_ERROR(\n  //     cudaMemcpy(state_der_d, state_der.data(), sizeof(float) * DYNAMICS_T::STATE_DIM * count,\n  //     cudaMemcpyHostToDevice));\n  // HANDLE_ERROR(cudaMemcpy(next_state_d, next_state.data(), sizeof(float) * DYNAMICS_T::STATE_DIM * count,\n  //                         cudaMemcpyHostToDevice));\n  HANDLE_ERROR(\n      cudaMemcpy(control_d, control.data(), sizeof(float) * DYNAMICS_T::CONTROL_DIM * count, cudaMemcpyHostToDevice));\n\n  const int gridsize_x = (count - 1) / dim_x + 1;\n  dim3 threadsPerBlock(dim_x, dim_y);\n  dim3 numBlocks(gridsize_x, 1);\n\n  unsigned shared_mem = mppi::kernels::calcClassSharedMemSize(&dynamics, threadsPerBlock);\n  stepTestKernel<DYNAMICS_T><<<numBlocks, threadsPerBlock, shared_mem>>>(\n      dynamics.model_d_, state_d, control_d, state_der_d, next_state_d, output_d, t, dt, count);\n  HANDLE_ERROR(cudaGetLastError());\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(\n      cudaMemcpy(state.data(), state_d, sizeof(float) * DYNAMICS_T::STATE_DIM * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(state_der.data(), state_der_d, sizeof(float) * DYNAMICS_T::STATE_DIM * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(next_state.data(), next_state_d, sizeof(float) * DYNAMICS_T::STATE_DIM * count,\n                          cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(control.data(), control_d, sizeof(float) * DYNAMICS_T::CONTROL_DIM * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(output.data(), output_d, sizeof(float) * DYNAMICS_T::OUTPUT_DIM * count, cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(state_d);\n  cudaFree(state_der_d);\n  cudaFree(control_d);\n  cudaFree(next_state_d);\n  cudaFree(output_d);\n}\n\ntemplate <class DYN_T>\nvoid checkGPUComputationStep(DYN_T& dynamics, float dt, int max_y_dim, int x_dim,\n                             typename DYN_T::buffer_trajectory buffer, double tol)\n{\n  CudaCheckError();\n  dynamics.GPUSetup();\n  CudaCheckError();\n\n  const int num_points = 1024;\n  Eigen::Matrix<float, DYN_T::CONTROL_DIM, num_points> control_trajectory;\n  control_trajectory = Eigen::Matrix<float, DYN_T::CONTROL_DIM, num_points>::Random();\n  Eigen::Matrix<float, DYN_T::STATE_DIM, num_points> state_trajectory;\n  state_trajectory = Eigen::Matrix<float, DYN_T::STATE_DIM, num_points>::Random();\n\n  std::vector<std::array<float, DYN_T::STATE_DIM>> s(num_points);\n  std::vector<std::array<float, DYN_T::STATE_DIM>> s_der(num_points);\n  std::vector<std::array<float, DYN_T::STATE_DIM>> s_next(num_points);\n  std::vector<std::array<float, DYN_T::OUTPUT_DIM>> output(num_points);\n  // steering, throttle\n  std::vector<std::array<float, DYN_T::CONTROL_DIM>> u(num_points);\n  for (int state_index = 0; state_index < s.size(); state_index++)\n  {\n    for (int dim = 0; dim < s[0].size(); dim++)\n    {\n      s[state_index][dim] = state_trajectory.col(state_index)(dim);\n    }\n    for (int dim = 0; dim < u[0].size(); dim++)\n    {\n      u[state_index][dim] = control_trajectory.col(state_index)(dim);\n    }\n  }\n\n  // Run dynamics on GPU\n  for (int y_dim = 1; y_dim <= max_y_dim; y_dim++)\n  {\n    if (dynamics.checkRequiresBuffer())\n    {\n      dynamics.updateFromBuffer(buffer);\n    }\n    launchStepTestKernel<DYN_T>(dynamics, s, u, s_der, s_next, output, 0, dt, y_dim, x_dim);\n    for (int point = 0; point < num_points; point++)\n    {\n      typename DYN_T::state_array state = state_trajectory.col(point);\n      typename DYN_T::state_array next_state = DYN_T::state_array::Zero();\n      typename DYN_T::control_array control = control_trajectory.col(point);\n      typename DYN_T::state_array state_der_cpu = DYN_T::state_array::Zero();\n      typename DYN_T::output_array output_array_cpu = DYN_T::output_array::Zero();\n      dynamics.initializeDynamics(state, control, output_array_cpu, 0, dt);\n\n      dynamics.step(state, next_state, state_der_cpu, control, output_array_cpu, 0.0f, dt);\n\n      for (int dim = 0; dim < DYN_T::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state(dim), s[point][dim], tol)\n            << \"at sample \" << point << \", state dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s[point][dim]));\n      }\n      for (int dim = 0; dim < DYN_T::CONTROL_DIM; dim++)\n      {\n        EXPECT_NEAR(control(dim), u[point][dim], tol)\n            << \"at sample \" << point << \", control dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(u[point][dim]));\n      }\n      for (int dim = 0; dim < DYN_T::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(state_der_cpu(dim), s_der[point][dim], tol)\n            << \"at sample \" << point << \", state deriv dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_der[point][dim]));\n      }\n      for (int dim = 0; dim < DYN_T::STATE_DIM; dim++)\n      {\n        EXPECT_NEAR(next_state(dim), s_next[point][dim], tol)\n            << \"at sample \" << point << \", next state dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s[point][dim]));\n      }\n      for (int dim = 0; dim < DYN_T::OUTPUT_DIM; dim++)\n      {\n        if (isnan(output_array_cpu(dim)) && isnan(output[point][dim]))\n        {\n          continue;\n        }\n        EXPECT_NEAR(output_array_cpu(dim), output[point][dim], tol * 1000)  // TODO this is a stupid hack\n            << \"at sample \" << point << \", output dim: \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(s_der[point][dim]));\n      }\n    }\n  }\n\n  dynamics.freeCudaMem();\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/dynamics/dynamics_generic_kernel_tests.cuh",
    "content": "#ifndef DYNAMICS_GENERIC_KERNEL_TESTS_CUH_\n#define DYNAMICS_GENERIC_KERNEL_TESTS_CUH_\n\n#include <array>\n#include <vector>\n#include <mppi/utils/math_utils.h>\n\ntemplate <typename CLASS_T, typename PARAMS_T>\n__global__ void parameterTestKernel(CLASS_T* class_t, PARAMS_T& params);\ntemplate <typename CLASS_T, typename PARAMS_T>\nvoid launchParameterTestKernel(CLASS_T& class_t, PARAMS_T& params);\n\ntemplate <typename DYNAMICS_T, int C_DIM>\n__global__ void controlRangesTestKernel(DYNAMICS_T* dynamics, float2* control_rngs);\ntemplate <typename DYNAMICS_T, int C_DIM>\nvoid launchControlRangesTestKernel(DYNAMICS_T& dynamics, std::array<float2, C_DIM>& control_rngs);\n\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM>\n__global__ void enforceConstraintTestKernel(DYNAMICS_T* dynamics, float* state, float* control, int num);\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM>\nvoid launchEnforceConstraintTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                       std::vector<std::array<float, C_DIM>>& control, int dim_y);\n\ntemplate <class DYNAMICS_T, int S_DIM>\n__global__ void computeKinematicsTestKernel(DYNAMICS_T* model, float* state, float* state_der, int num);\ntemplate <class DYNAMICS_T, int S_DIM>\nvoid launchComputeKinematicsTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                       std::vector<std::array<float, S_DIM>>& state_der, int dim_y);\n\ntemplate <class DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X>\n__global__ void computeDynamicsTestKernel(DYNAMICS_T* model, float* state, float* control, float* state_der, int count);\ntemplate <class DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X = 32>\nvoid launchComputeDynamicsTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                     std::vector<std::array<float, C_DIM>>& control,\n                                     std::vector<std::array<float, S_DIM>>& state_der, int dim_y);\n\ntemplate <typename DYNAMICS_T, int S_DIM>\n__global__ void updateStateTestKernel(DYNAMICS_T* dynamics, float* state, float* state_der, float dt, int num);\ntemplate <typename DYNAMICS_T, int S_DIM>\nvoid launchUpdateStateTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                 std::vector<std::array<float, S_DIM>>& state_der, float dt, int dim_y);\n\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X>\n__global__ void computeStateDerivTestKernel(DYNAMICS_T* dynamics, float* state, float* control, float* state_der,\n                                            int num);\ntemplate <typename DYNAMICS_T, int S_DIM, int C_DIM, int BLOCKDIM_X = 32>\nvoid launchComputeStateDerivTestKernel(DYNAMICS_T& dynamics, std::vector<std::array<float, S_DIM>>& state,\n                                       std::vector<std::array<float, C_DIM>>& control,\n                                       std::vector<std::array<float, S_DIM>>& state_der, int dim_y);\n\ntemplate <class DYN_T>\n__global__ void getSharedMemorySizesKernel(DYN_T* __restrict__ dynamics, int* __restrict__ output_d);\ntemplate <typename DYN_T>\nvoid launchGetSharedMemorySizesKernel(DYN_T& dynamics, int shared_mem_sizes[2]);\n\ntemplate <class DYN_T>\nvoid checkGPUComputationStep(DYN_T& dynamics, float dt, int max_y_dim, int x_dim,\n                             typename DYN_T::buffer_trajectory buffer, double tol = 1.0e-5);\n\n#if __CUDACC__\n#include \"dynamics_generic_kernel_tests.cu\"\n#endif\n\n#endif  // DYNAMICS_GENERIC_KERNEL_TESTS_CUH_\n"
  },
  {
    "path": "tests/include/kernel_tests/shaping_functions/shaping_function_kernels_tests.cu",
    "content": "#include \"shaping_function_kernels_tests.cuh\"\n\ntemplate <class CLASS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid launchShapingFunction_KernelTest(std::array<float, NUM_ROLLOUTS>& trajectory_costs_host, CLASS_T& shape_function,\n                                      float baseline, std::array<float, NUM_ROLLOUTS>& normalized_compute)\n{\n  // Allocate CUDA memory\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * trajectory_costs_host.size()))\n\n  HANDLE_ERROR(cudaMemcpy(trajectory_costs_d, trajectory_costs_host.data(),\n                          sizeof(float) * trajectory_costs_host.size(), cudaMemcpyHostToDevice))\n\n  mppi_common::weightKernel<CLASS_T, NUM_ROLLOUTS>\n      <<<1, NUM_ROLLOUTS>>>(trajectory_costs_d, baseline, shape_function.shaping_function_d_);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(normalized_compute.data(), trajectory_costs_d, sizeof(float) * trajectory_costs_host.size(),\n                          cudaMemcpyDeviceToHost))\n\n  cudaFree(trajectory_costs_d);\n}\n\ntemplate <class CLASS_T, int NUM_ROLLOUTS>\nvoid launchShapingFunction_KernelTest(typename CLASS_T::cost_traj& trajectory_costs_host, CLASS_T& shape_function,\n                                      float baseline, std::array<float, NUM_ROLLOUTS>& normalized_compute,\n                                      cudaStream_t stream)\n{\n  // Allocate CUDA memory\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * trajectory_costs_host.size()))\n\n  HANDLE_ERROR(cudaMemcpy(trajectory_costs_d, trajectory_costs_host.data(),\n                          sizeof(float) * trajectory_costs_host.size(), cudaMemcpyHostToDevice))\n\n  shape_function.launchWeightKernel(trajectory_costs_d, baseline, stream);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(normalized_compute.data(), trajectory_costs_d, sizeof(float) * trajectory_costs_host.size(),\n                          cudaMemcpyDeviceToHost))\n\n  cudaFree(trajectory_costs_d);\n}\n"
  },
  {
    "path": "tests/include/kernel_tests/shaping_functions/shaping_function_kernels_tests.cuh",
    "content": "#ifndef MPPIGENERIC_SHAPING_FUNCTION_KERNEL_TEST_CUH\n#define MPPIGENERIC_SHAPING_FUNCTION_KERNEL_TEST_CUH\n\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/utils/managed.cuh>\n\ntemplate <class CLASS_T, int NUM_ROLLOUTS>\nvoid launchShapingFunction_KernelTest(typename CLASS_T::cost_traj& trajectory_costs_host, CLASS_T& shape_function,\n                                      float baseline, std::array<float, NUM_ROLLOUTS>& normalized_compute,\n                                      cudaStream_t stream = nullptr);\n\ntemplate <class CLASS_T, int NUM_ROLLOUTS, int BDIM_X>\nvoid launchShapingFunction_KernelTest(std::array<float, NUM_ROLLOUTS>& trajectory_costs_host, CLASS_T& shape_function,\n                                      float baseline, std::array<float, NUM_ROLLOUTS>& normalized_compute);\n\n#if __CUDACC__\n#include \"shaping_function_kernels_tests.cu\"\n#endif\n\n#endif  // MPPIGENERIC_SHAPING_FUNCTION_KERNEL_TEST_CUH\n"
  },
  {
    "path": "tests/include/kernel_tests/utils/network_helper_kernel_test.cuh",
    "content": "//\n// Created by jason on 8/19/22.\n//\n\n#ifndef MPPIGENERIC_NETWORK_HELPER_KERNEL_TEST_CUH\n#define MPPIGENERIC_NETWORK_HELPER_KERNEL_TEST_CUH\n\n#include <mppi/utils/math_utils.h>\n#include <mppi/core/mppi_common.cuh>\n\n// TODO check on multiple different blocks\n\ntemplate <class NETWORK_T>\n__global__ void parameterCheckTestKernel(NETWORK_T* model, float* theta, int* stride, int* net_structure,\n                                         float* shared_theta, int* shared_stride, int* shared_net_structure)\n{\n  extern __shared__ float theta_s[];\n  model->initialize(theta_s);\n\n  float* theta_shared_ptr = theta_s;\n  int* stride_shared_ptr = (int*)(theta_s + model->getNumParams());\n  int* structure_shared_ptr = (int*)(theta_s + model->getNumParams() + model->getStrideSize());\n\n  for (int i = 0; i < model->getNumParams(); i++)\n  {\n    theta[i] = model->getThetaPtr()[i];\n    shared_theta[i] = theta_shared_ptr[i];\n  }\n  for (int i = 0; i < model->getStrideSize(); i++)\n  {\n    stride[i] = model->getStrideIdcsPtr()[i];\n    shared_stride[i] = stride_shared_ptr[i];\n  }\n  for (int i = 0; i < model->getNumLayers(); i++)\n  {\n    net_structure[i] = model->getNetStructurePtr()[i];\n    shared_net_structure[i] = structure_shared_ptr[i];\n  }\n}\n\ntemplate <class NETWORK_T, int THETA_SIZE, int STRIDE_SIZE, int NUM_LAYERS>\nvoid launchParameterCheckTestKernel(NETWORK_T& model, std::array<float, THETA_SIZE>& theta,\n                                    std::array<int, STRIDE_SIZE>& stride, std::array<int, NUM_LAYERS>& net_structure,\n                                    std::array<float, THETA_SIZE>& shared_theta,\n                                    std::array<int, STRIDE_SIZE>& shared_stride,\n                                    std::array<int, NUM_LAYERS>& shared_net_structure)\n{\n  float* theta_d;\n  int* stride_d;\n  int* net_structure_d;\n\n  float* shared_theta_d;\n  int* shared_stride_d;\n  int* shared_net_structure_d;\n\n  HANDLE_ERROR(cudaMalloc((void**)&theta_d, sizeof(float) * theta.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&stride_d, sizeof(int) * stride.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&net_structure_d, sizeof(int) * net_structure.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&shared_theta_d, sizeof(float) * theta.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&shared_stride_d, sizeof(int) * stride.size()))\n  HANDLE_ERROR(cudaMalloc((void**)&shared_net_structure_d, sizeof(int) * net_structure.size()))\n\n  dim3 threadsPerBlock(3, 1);\n  dim3 numBlocks(10, 1);\n  unsigned shared_size = mppi::kernels::calcClassSharedMemSize(&model, threadsPerBlock);\n  parameterCheckTestKernel<NETWORK_T><<<numBlocks, threadsPerBlock, shared_size>>>(\n      model.network_d_, theta_d, stride_d, net_structure_d, shared_theta_d, shared_stride_d, shared_net_structure_d);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(theta.data(), theta_d, sizeof(float) * theta.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(stride.data(), stride_d, sizeof(int) * stride.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(\n      cudaMemcpy(net_structure.data(), net_structure_d, sizeof(int) * net_structure.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(shared_theta.data(), shared_theta_d, sizeof(float) * theta.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(shared_stride.data(), shared_stride_d, sizeof(int) * stride.size(), cudaMemcpyDeviceToHost))\n  HANDLE_ERROR(cudaMemcpy(shared_net_structure.data(), shared_net_structure_d, sizeof(int) * net_structure.size(),\n                          cudaMemcpyDeviceToHost))\n  cudaDeviceSynchronize();\n\n  cudaFree(theta_d);\n  cudaFree(stride_d);\n  cudaFree(net_structure_d);\n  cudaFree(shared_theta_d);\n  cudaFree(shared_stride_d);\n  cudaFree(shared_net_structure_d);\n}\n\ntemplate <class NETWORK_T>\n__global__ void parameterCheckTestKernel(NETWORK_T* model, float* lstm_params, float* shared_lstm_params,\n                                         float* fnn_params, float* shared_fnn_params)\n{\n  extern __shared__ float theta_s[];\n  model->initialize(theta_s);\n  uint tid = blockIdx.x;\n\n  float* lstm_params_start =\n      lstm_params + tid * (model->getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model->getHiddenDim());\n  memcpy(lstm_params_start, model->getWeights(),\n         model->getLSTMGrdSharedSizeBytes() + 2 * model->getHiddenDim() * sizeof(float));\n\n  float* fnn_params_start = fnn_params + tid * model->getOutputGrdSharedSizeBytes() / sizeof(float);\n  memcpy(fnn_params_start, model->getOutputWeights(), model->getOutputGrdSharedSizeBytes());\n\n  float* shared_lstm_params_start =\n      shared_lstm_params + tid * (model->getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model->getHiddenDim());\n  memcpy(shared_lstm_params_start, theta_s, model->getLSTMGrdSharedSizeBytes());\n  memcpy(shared_lstm_params_start + model->getLSTMGrdSharedSizeBytes() / sizeof(float),\n         theta_s + model->getGrdSharedSizeBytes() / sizeof(float), model->getHiddenDim() * 2 * sizeof(float));\n\n  float* shared_fnn_params_start = shared_fnn_params + tid * model->getOutputGrdSharedSizeBytes() / sizeof(float);\n  memcpy(shared_fnn_params_start, theta_s + model->getLSTMGrdSharedSizeBytes() / sizeof(float),\n         model->getOutputGrdSharedSizeBytes());\n}\n\ntemplate <class NETWORK_T>\nvoid launchParameterCheckTestKernel(NETWORK_T& model, std::vector<float>& lstm_params,\n                                    std::vector<float>& shared_lstm_params, std::vector<float>& fnn_params,\n                                    std::vector<float>& shared_fnn_params, int num)\n{\n  lstm_params.resize((model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim()) * num);\n  shared_lstm_params.resize((model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim()) * num);\n  fnn_params.resize(model.getOutputGrdSharedSizeBytes() / sizeof(float) * num);\n  shared_fnn_params.resize(model.getOutputGrdSharedSizeBytes() / sizeof(float) * num);\n\n  std::fill(lstm_params.begin(), lstm_params.end(), -1);\n  std::fill(shared_lstm_params.begin(), shared_lstm_params.end(), -1);\n  std::fill(fnn_params.begin(), fnn_params.end(), -2);\n  std::fill(shared_fnn_params.begin(), shared_fnn_params.end(), -2);\n\n  float* lstm_params_d = nullptr;\n  float* shared_lstm_params_d = nullptr;\n  float* fnn_params_d = nullptr;\n  float* shared_fnn_params_d = nullptr;\n\n  HANDLE_ERROR(cudaMalloc((void**)&lstm_params_d,\n                          (model.getLSTMGrdSharedSizeBytes() + 2 * model.getHiddenDim() * sizeof(float)) * num));\n  HANDLE_ERROR(cudaMalloc((void**)&shared_lstm_params_d,\n                          (model.getLSTMGrdSharedSizeBytes() + 2 * model.getHiddenDim() * sizeof(float)) * num));\n  HANDLE_ERROR(cudaMalloc((void**)&fnn_params_d, model.getOutputGrdSharedSizeBytes() * num));\n  HANDLE_ERROR(cudaMalloc((void**)&shared_fnn_params_d, model.getOutputGrdSharedSizeBytes() * num));\n\n  dim3 threadsPerBlock(1, 1);\n  dim3 numBlocks(num, 1);\n  unsigned shared_size = mppi::kernels::calcClassSharedMemSize(&model, threadsPerBlock);\n  parameterCheckTestKernel<NETWORK_T><<<numBlocks, threadsPerBlock, shared_size>>>(\n      model.network_d_, lstm_params_d, shared_lstm_params_d, fnn_params_d, shared_fnn_params_d);\n  CudaCheckError();\n\n  HANDLE_ERROR(cudaMemcpy(lstm_params.data(), lstm_params_d,\n                          (model.getLSTMGrdSharedSizeBytes() + 2 * model.getHiddenDim() * sizeof(float)) * num,\n                          cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(shared_lstm_params.data(), shared_lstm_params_d,\n                          (model.getLSTMGrdSharedSizeBytes() + 2 * model.getHiddenDim() * sizeof(float)) * num,\n                          cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(fnn_params.data(), fnn_params_d, model.getOutputGrdSharedSizeBytes() * num, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(shared_fnn_params.data(), shared_fnn_params_d, model.getOutputGrdSharedSizeBytes() * num,\n                          cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(lstm_params_d);\n  cudaFree(shared_lstm_params_d);\n  cudaFree(fnn_params_d);\n  cudaFree(shared_fnn_params_d);\n}\n\ntemplate <typename NETWORK_T, int BLOCKSIZE_X>\n__global__ void forwardTestKernel(NETWORK_T* network, float* input, float* output, int num, int steps)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  extern __shared__ float theta_s[];\n  float* local_input = input + (tid * network->getInputDim());\n  float* local_output = output + (tid * network->getOutputDim());\n\n  network->initialize(theta_s);\n\n  if (tid < num)\n  {\n    float* curr_act = nullptr;\n    for (uint step = 0; step < steps; step++)\n    {\n      curr_act = network->forward(local_input, theta_s);\n    }\n    for (uint i = threadIdx.y; i < network->getOutputDim(); i += blockDim.y)\n    {\n      local_output[i] = curr_act[i];\n    }\n    __syncthreads();\n  }\n}\n\ntemplate <typename NETWORK_T, int INPUT_DIM, int OUTPUT_DIM, int BLOCKSIZE_X>\nvoid launchForwardTestKernel(NETWORK_T& model, std::vector<std::array<float, INPUT_DIM>>& input,\n                             std::vector<std::array<float, OUTPUT_DIM>>& output, int dim_y, int steps = 1)\n{\n  if (input.size() != output.size())\n  {\n    std::cerr << \"Num States doesn't match num controls\" << std::endl;\n    return;\n  }\n  int count = input.size();\n  float* input_d;\n  float* output_d;\n  HANDLE_ERROR(cudaMalloc((void**)&input_d, sizeof(float) * model.getInputDim() * count));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * model.getOutputDim() * count));\n\n  HANDLE_ERROR(cudaMemcpy(input_d, input.data(), sizeof(float) * model.getInputDim() * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(\n      cudaMemcpy(output_d, input.data(), sizeof(float) * model.getOutputDim() * count, cudaMemcpyHostToDevice));\n\n  const int gridsize_x = (count - 1) / BLOCKSIZE_X + 1;\n  dim3 threadsPerBlock(BLOCKSIZE_X, dim_y);\n  dim3 numBlocks(gridsize_x, 1);\n  unsigned shared_size = mppi::kernels::calcClassSharedMemSize(&model, threadsPerBlock);\n  forwardTestKernel<NETWORK_T, BLOCKSIZE_X>\n      <<<numBlocks, threadsPerBlock, shared_size>>>(model.network_d_, input_d, output_d, count, steps);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(input.data(), input_d, sizeof(float) * model.getInputDim() * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(\n      cudaMemcpy(output.data(), output_d, sizeof(float) * model.getOutputDim() * count, cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(input_d);\n  cudaFree(output_d);\n}\n\ntemplate <typename NETWORK_T, int BLOCKSIZE_X>\n__global__ void forwardTestKernelPreload(NETWORK_T* network, float* input, float* output, int num, int steps)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  extern __shared__ float theta_s[];\n  float* local_input = input + (tid * network->getInputDim());\n  float* local_output = output + (tid * network->getOutputDim());\n\n  network->initialize(theta_s);\n\n  if (tid < num)\n  {\n    float* input_loc = network->getInputLocation(theta_s);\n    for (int i = threadIdx.y; i < network->getInputDim(); i += blockDim.y)\n    {\n      input_loc[i] = local_input[i];\n    }\n    __syncthreads();\n\n    float* curr_act = nullptr;\n    for (uint step = 0; step < steps; step++)\n    {\n      curr_act = network->forward(nullptr, theta_s);\n    }\n    for (uint i = threadIdx.y; i < network->getOutputDim(); i += blockDim.y)\n    {\n      local_output[i] = curr_act[i];\n    }\n    __syncthreads();\n  }\n}\n\ntemplate <typename NETWORK_T, int INPUT_DIM, int OUTPUT_DIM, int BLOCKSIZE_X>\nvoid launchForwardTestKernelPreload(NETWORK_T& model, std::vector<std::array<float, INPUT_DIM>>& input,\n                                    std::vector<std::array<float, OUTPUT_DIM>>& output, int dim_y, int steps = 1)\n{\n  if (input.size() != output.size())\n  {\n    std::cerr << \"Num States doesn't match num controls\" << std::endl;\n    return;\n  }\n  int count = input.size();\n  float* input_d;\n  float* output_d;\n  HANDLE_ERROR(cudaMalloc((void**)&input_d, sizeof(float) * INPUT_DIM * count));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * OUTPUT_DIM * count));\n\n  HANDLE_ERROR(cudaMemcpy(input_d, input.data(), sizeof(float) * INPUT_DIM * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(output_d, input.data(), sizeof(float) * OUTPUT_DIM * count, cudaMemcpyHostToDevice));\n\n  const int gridsize_x = (count - 1) / BLOCKSIZE_X + 1;\n  dim3 threadsPerBlock(BLOCKSIZE_X, dim_y);\n  dim3 numBlocks(gridsize_x, 1);\n  unsigned shared_size = mppi::kernels::calcClassSharedMemSize(&model, threadsPerBlock);\n  forwardTestKernelPreload<NETWORK_T, BLOCKSIZE_X>\n      <<<numBlocks, threadsPerBlock, shared_size>>>(model.network_d_, input_d, output_d, count, steps);\n  CudaCheckError();\n\n  // Copy the memory back to the host\n  HANDLE_ERROR(cudaMemcpy(input.data(), input_d, sizeof(float) * INPUT_DIM * count, cudaMemcpyDeviceToHost));\n  HANDLE_ERROR(cudaMemcpy(output.data(), output_d, sizeof(float) * OUTPUT_DIM * count, cudaMemcpyDeviceToHost));\n  cudaDeviceSynchronize();\n\n  cudaFree(input_d);\n  cudaFree(output_d);\n}\n\n#endif  // MPPIGENERIC_NETWORK_HELPER_KERNEL_TEST_CUH\n"
  },
  {
    "path": "tests/include/kernel_tests/utils/texture_test_kernels.cuh",
    "content": "//\n// Created by jason on 1/9/22.\n//\n\n#ifndef MPPIGENERIC_TEXTURE_TEST_KERNELS_CUH\n#define MPPIGENERIC_TEXTURE_TEST_KERNELS_CUH\n\n#include <mppi/utils/texture_helpers/two_d_texture_helper.cuh>\n\ntemplate <class TEX_T, class DATA_T>\n__global__ void textureTestKernel(TEX_T& tex, DATA_T* test_results, float4* test_indexes, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  // printf(\"\\nEntering the kernel!\\n\");\n  // printf(\"The thread id is: %i\\n\", tid);\n  if (tid < num_points)\n  {\n    // printf(\"thread ia: %i went to check texture at index %i, %i\\n\", tid, test_indexes[tid].x, test_indexes[tid].y);\n\n    // query texture\n    float3 query_point = make_float3(test_indexes[tid].x, test_indexes[tid].y, test_indexes[tid].z);\n    int index = round(test_indexes[tid].w);\n    test_results[tid] = tex.queryTexture(index, query_point);\n    // printf(\"query at %d %f,%f,%f = %f, %f, %f, %f\\n\", index, query_point.x, query_point.y, query_point.z,\n    //       test_results[tid].x, test_results[tid].y, test_results[tid].z, test_results[tid].w);\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nstd::vector<DATA_T> getTextureAtPointsKernel(const TEX_T& helper, std::vector<float4>& query_points)\n{\n  std::vector<DATA_T> results;\n  int num_test_points = query_points.size();\n  results.resize(num_test_points);\n\n  float4* tex_results_d;\n  float4* tex_query_points_d;\n  HANDLE_ERROR(cudaMalloc((void**)&tex_results_d, sizeof(DATA_T) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&tex_query_points_d, sizeof(float4) * num_test_points))\n\n  HANDLE_ERROR(\n      cudaMemcpy(tex_query_points_d, query_points.data(), sizeof(float4) * num_test_points, cudaMemcpyHostToDevice));\n\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  textureTestKernel<<<numBlocks, threadsPerBlock>>>(*helper.ptr_d_, tex_results_d, tex_query_points_d, num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  HANDLE_ERROR(cudaMemcpy(results.data(), tex_results_d, sizeof(DATA_T) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(tex_results_d);\n  cudaFree(tex_query_points_d);\n  return results;\n}\n\ntemplate <class TEX_T, class DATA_T>\n__global__ void textureAtMapPoseTestKernel(TEX_T& tex, DATA_T* test_results, float4* test_indexes, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num_points)\n  {\n    // query texture\n    float3 query_point = make_float3(test_indexes[tid].x, test_indexes[tid].y, test_indexes[tid].z);\n    int index = round(test_indexes[tid].w);\n    test_results[tid] = tex.queryTextureAtMapPose(index, query_point);\n    // printf(\"query at %d %f,%f,%f = %f, %f, %f, %f\\n\", index, query_point.x, query_point.y, query_point.z,\n    //       test_results[tid].x, test_results[tid].y, test_results[tid].z, test_results[tid].w);\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nstd::vector<DATA_T> getTextureAtMapPointsKernel(const TEX_T& helper, std::vector<float4>& query_points)\n{\n  std::vector<DATA_T> results;\n  int num_test_points = query_points.size();\n  results.resize(num_test_points);\n\n  float4* tex_results_d;\n  float4* tex_query_points_d;\n  HANDLE_ERROR(cudaMalloc((void**)&tex_results_d, sizeof(DATA_T) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&tex_query_points_d, sizeof(float4) * num_test_points))\n\n  HANDLE_ERROR(\n      cudaMemcpy(tex_query_points_d, query_points.data(), sizeof(float4) * num_test_points, cudaMemcpyHostToDevice));\n\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  textureAtMapPoseTestKernel<<<numBlocks, threadsPerBlock>>>(*helper.ptr_d_, tex_results_d, tex_query_points_d,\n                                                             num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  HANDLE_ERROR(cudaMemcpy(results.data(), tex_results_d, sizeof(DATA_T) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(tex_results_d);\n  cudaFree(tex_query_points_d);\n  return results;\n}\n\ntemplate <class TEX_T, class DATA_T>\n__global__ void textureAtWorldPoseTestKernel(TEX_T& tex, DATA_T* test_results, float4* test_indexes, int num_points)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num_points)\n  {\n    // query texture\n    float3 query_point = make_float3(test_indexes[tid].x, test_indexes[tid].y, test_indexes[tid].z);\n    int index = round(test_indexes[tid].w);\n    test_results[tid] = tex.queryTextureAtWorldPose(index, query_point);\n    // printf(\"query at %d %f,%f,%f = %f, %f, %f, %f\\n\", index, query_point.x, query_point.y, query_point.z,\n    //       test_results[tid].x, test_results[tid].y, test_results[tid].z, test_results[tid].w);\n  }\n}\n\ntemplate <class TEX_T, class DATA_T>\nstd::vector<DATA_T> getTextureAtWorldPointsKernel(const TEX_T& helper, std::vector<float4>& query_points)\n{\n  std::vector<DATA_T> results;\n  int num_test_points = query_points.size();\n  results.resize(num_test_points);\n\n  float4* tex_results_d;\n  float4* tex_query_points_d;\n  HANDLE_ERROR(cudaMalloc((void**)&tex_results_d, sizeof(DATA_T) * num_test_points))\n  HANDLE_ERROR(cudaMalloc((void**)&tex_query_points_d, sizeof(float4) * num_test_points))\n\n  HANDLE_ERROR(\n      cudaMemcpy(tex_query_points_d, query_points.data(), sizeof(float4) * num_test_points, cudaMemcpyHostToDevice));\n\n  dim3 threadsPerBlock(num_test_points, 1);\n  dim3 numBlocks(1, 1);\n  textureAtWorldPoseTestKernel<<<numBlocks, threadsPerBlock>>>(*helper.ptr_d_, tex_results_d, tex_query_points_d,\n                                                               num_test_points);\n  CudaCheckError();\n  cudaDeviceSynchronize();\n\n  HANDLE_ERROR(cudaMemcpy(results.data(), tex_results_d, sizeof(DATA_T) * num_test_points, cudaMemcpyDeviceToHost));\n\n  cudaDeviceSynchronize();\n\n  cudaFree(tex_results_d);\n  cudaFree(tex_query_points_d);\n  return results;\n}\n\n#if __CUDACC__\n#include \"texture_test_kernels.cuh\"\n#endif\n\n#endif  // MPPIGENERIC_TEXTURE_TEST_KERNELS_CUH\n"
  },
  {
    "path": "tests/include/mppi_test/mock_classes/mock_classes.h",
    "content": "//\n// Created by jgibson37 on 2/22/24.\n//\n\n#ifndef MPPIGENERIC_MOCK_CLASSES_H\n#define MPPIGENERIC_MOCK_CLASSES_H\n\nconst int NUM_TIMESTEPS = 100;\n\n#include <mppi_test/mock_classes/mock_costs.h>\n#include <mppi_test/mock_classes/mock_dynamics.h>\n#include <mppi_test/mock_classes/mock_sampler.h>\n#include <mppi_test/mock_classes/mock_feedback.h>\n\n#endif  // MPPIGENERIC_MOCK_CLASSES_H\n"
  },
  {
    "path": "tests/include/mppi_test/mock_classes/mock_controller.h",
    "content": "//\n// Created by jason on 4/14/20.\n//\n\n#ifndef MPPIGENERIC_MOCK_CONTROLLER_H\n#define MPPIGENERIC_MOCK_CONTROLLER_H\n\n#include <gtest/gtest.h>\n#include <gmock/gmock.h>\n#include <mppi/controllers/controller.cuh>\n#include <mppi_test/mock_classes/mock_classes.h>\n\n// ===== mock controller ====\nclass MockController\n  : public Controller<MockDynamics, MockCost, MockFeedback, MockSamplingDistribution, NUM_TIMESTEPS, 512>\n{\npublic:\n  MOCK_METHOD0(calculateSampledStateTrajectories, void());\n  MOCK_METHOD0(resetControls, void());\n  MOCK_METHOD1(computeFeedback, void(const Eigen::Ref<const state_array>& state));\n  MOCK_METHOD1(slideControlSequence, void(int stride));\n  MOCK_METHOD5(getCurrentControl, control_array(Eigen::Ref<state_array>, double, Eigen::Ref<state_array>,\n                                                Eigen::Ref<control_trajectory>, TEMPLATED_FEEDBACK_STATE&));\n  MOCK_METHOD2(computeControl, void(const Eigen::Ref<const state_array>& state, int optimization_stride));\n  MOCK_METHOD(control_trajectory, getControlSeq, (), (const, override));\n  MOCK_METHOD(state_trajectory, getTargetStateSeq, (), (const, override));\n  // MOCK_METHOD(output_trajectory, getTargetOutputSeq, (), (const, override));\n  MOCK_METHOD(TEMPLATED_FEEDBACK_STATE, getFeedbackState, (), (const, override));\n  MOCK_METHOD(control_array, getFeedbackControl,\n              (const Eigen::Ref<const state_array>&, const Eigen::Ref<const state_array>&, int), (override));\n  MOCK_METHOD1(updateImportanceSampler, void(const Eigen::Ref<const control_trajectory>& nominal_control));\n  MOCK_METHOD0(allocateCUDAMemory, void());\n  MOCK_METHOD0(computeFeedbackPropagatedStateSeq, void());\n  MOCK_METHOD0(smoothControlTrajectory, void());\n  MOCK_METHOD1(computeStateTrajectory, void(const Eigen::Ref<const state_array>& x0));\n  MOCK_METHOD1(setPercentageSampledControlTrajectories, void(float new_perc));\n  MOCK_METHOD0(getSampledNoise, std::vector<float>());\n  MOCK_METHOD0(getDt, float());\n};\n#endif  // MPPIGENERIC_MOCK_CONTROLLER_H\n"
  },
  {
    "path": "tests/include/mppi_test/mock_classes/mock_costs.h",
    "content": "//\n// Created by jason on 4/14/20.\n//\n\n#ifndef MPPIGENERIC_MOCK_COSTS_H\n#define MPPIGENERIC_MOCK_COSTS_H\n\n#include <gtest/gtest.h>\n#include <gmock/gmock.h>\n#include <mppi/cost_functions/cost.cuh>\n\n// ===== mock cost ====\ntypedef struct\n{\n  int test = 1;\n} mockCostParams;\n\nclass MockCost : public Cost<MockCost, mockCostParams, DynamicsParams>\n{\npublic:\n  MOCK_METHOD1(bindToStream, void(cudaStream_t stream));\n  MOCK_METHOD1(setParams, void(mockCostParams params));\n  MOCK_METHOD0(getParams, mockCostParams());\n  MOCK_METHOD0(GPUSetup, void());\n  MOCK_METHOD0(freeCudaMem, void());\n};\n#endif  // MPPIGENERIC_MOCK_COSTS_H\n"
  },
  {
    "path": "tests/include/mppi_test/mock_classes/mock_dynamics.h",
    "content": "//\n// Created by jason on 4/14/20.\n//\n\n#ifndef MPPIGENERIC_MOCK_DYNAMICS_H\n#define MPPIGENERIC_MOCK_DYNAMICS_H\n\n#include <gtest/gtest.h>\n#include <gmock/gmock.h>\n#include <mppi/dynamics/dynamics.cuh>\n\n// ===== mock dynamics ====\nstruct mockDynamicsParams : public DynamicsParams\n{\n  int test = 2;\n  bool copy_everything = false;\n  float buffer[(1 + 1) * 10] = { 0.0 };\n\n  void updateBuffer(Eigen::Matrix<float, 2, 11> new_buffer)\n  {\n  }\n};\n\nclass MockDynamics : public MPPI_internal::Dynamics<MockDynamics, mockDynamicsParams>\n{\npublic:\n  typedef Eigen::Matrix<float, CONTROL_DIM, 1> control_array;                 // Control at a time t\n  typedef Eigen::Matrix<float, STATE_DIM, 1> state_array;                     // State at a time t\n  typedef Eigen::Matrix<float, OUTPUT_DIM, 1> output_array;                   // Output at a time t\n  typedef Eigen::Matrix<float, STATE_DIM, STATE_DIM> dfdx;                    // Jacobian wrt x\n  typedef Eigen::Matrix<float, STATE_DIM, CONTROL_DIM> dfdu;                  // Jacobian wrt u\n  typedef Eigen::Matrix<float, CONTROL_DIM, STATE_DIM> feedback_matrix;       // Feedback matrix\n  typedef Eigen::Matrix<float, STATE_DIM, STATE_DIM + CONTROL_DIM> Jacobian;  // Jacobian of x and u\n\n  typedef std::map<std::string, Eigen::VectorXf> buffer_trajectory;\n\n  MOCK_METHOD1(bindToStream, void(cudaStream_t stream));\n  MOCK_METHOD1(setParams, void(mockDynamicsParams params));\n  MOCK_METHOD0(GPUSetup, void());\n  MOCK_METHOD0(getControlRanges, std::array<float2, 1>());\n  MOCK_METHOD0(getControlRangesRaw, void());\n  MOCK_METHOD0(getParams, mockDynamicsParams());\n  // MOCK_METHOD0(freeCudaMem, void());\n  MOCK_METHOD0(paramsToDevice, void());\n  MOCK_METHOD3(computeDynamics, void(const Eigen::Ref<const state_array>&, const Eigen::Ref<const control_array>&,\n                                     Eigen::Ref<state_array>));\n  MOCK_METHOD2(computeKinematics, void(Eigen::Ref<const state_array>&, Eigen::Ref<state_array>));\n  MOCK_METHOD3(computeStateDeriv, void(const Eigen::Ref<const state_array>&, const Eigen::Ref<const control_array>&,\n                                       Eigen::Ref<state_array>));\n  MOCK_METHOD4(updateState,\n               void(const Eigen::Ref<const state_array>, Eigen::Ref<state_array>, Eigen::Ref<state_array>, float));\n\n  MOCK_METHOD7(step, void(Eigen::Ref<state_array>, Eigen::Ref<state_array>, Eigen::Ref<state_array>,\n                          const Eigen::Ref<const control_array>&, Eigen::Ref<output_array>, float, float));\n  MOCK_METHOD3(updateState, void(const Eigen::Ref<const state_array>, Eigen::Ref<state_array>, float));\n  MOCK_METHOD2(enforceConstraints, void(Eigen::Ref<state_array>, Eigen::Ref<control_array>));\n  MOCK_METHOD4(computeGrad, bool(const Eigen::Ref<const state_array>&, const Eigen::Ref<const control_array>,\n                                 Eigen::Ref<dfdx>, Eigen::Ref<dfdu>));\n  MOCK_METHOD1(updateFromBuffer, void(const buffer_trajectory& buffer));\n  MOCK_METHOD1(stateFromMap, state_array(const std::map<std::string, float>&));\n  MOCK_METHOD0(freeCudaMem, void());\n};\n\n#endif  // MPPIGENERIC_MOCK_DYNAMICS_H\n"
  },
  {
    "path": "tests/include/mppi_test/mock_classes/mock_feedback.h",
    "content": "//\n// Created by jason on 2/21/24.\n//\n\n#ifndef MPPIGENERIC_TESTS_INCLUDE_MPPI_TEST_MOCK_CLASSES_MOCK_FEEDBACK_H_\n#define MPPIGENERIC_TESTS_INCLUDE_MPPI_TEST_MOCK_CLASSES_MOCK_FEEDBACK_H_\n\n#include <mppi_test/mock_classes/mock_classes.h>\n#include <mppi/feedback_controllers/feedback.cuh>\n\nstruct mockGPUFeedbackParams\n{\n};\n\nclass MockGPUFeedback : public GPUFeedbackController<MockGPUFeedback, MockDynamics, GPUState>\n{\npublic:\n  using DYN_T = MockDynamics;\n  using FEEDBACK_STATE_T = GPUState;\n\n  MockGPUFeedback(cudaStream_t stream) : GPUFeedbackController<MockGPUFeedback, MockDynamics, GPUState>(stream)\n  {\n  }\n};\n\nclass MockFeedback : public FeedbackController<MockGPUFeedback, mockGPUFeedbackParams, NUM_TIMESTEPS>\n{\npublic:\n  MOCK_METHOD0(initTrackingController, void());\n  MOCK_METHOD4(k_, control_array(const Eigen::Ref<const state_array>& x_act,\n                                 const Eigen::Ref<const state_array>& x_goal, int t, GPUState& fb_state));\n  MOCK_METHOD3(computeFeedback, void(const Eigen::Ref<const state_array>& init_state,\n                                     const Eigen::Ref<const state_trajectory>& goal_traj,\n                                     const Eigen::Ref<const control_trajectory>& control_traj));\n  MOCK_METHOD0(freeCudaMem, void());\n};\n\n#endif  // MPPIGENERIC_TESTS_INCLUDE_MPPI_TEST_MOCK_CLASSES_MOCK_FEEDBACK_H_\n"
  },
  {
    "path": "tests/include/mppi_test/mock_classes/mock_sampler.h",
    "content": "//\n// Created by jason on 2/21/24.\n//\n\n#ifndef MPPIGENERIC_TESTS_INCLUDE_MPPI_TEST_MOCK_CLASSES_MOCK_SAMPLER_H_\n#define MPPIGENERIC_TESTS_INCLUDE_MPPI_TEST_MOCK_CLASSES_MOCK_SAMPLER_H_\n\n#include <mppi/sampling_distributions/sampling_distribution.cuh>\n#include <mppi_test/mock_classes/mock_dynamics.h>\n\nclass MockSamplingDistribution\n  : public mppi::sampling_distributions::SamplingDistribution<\n        MockSamplingDistribution, mppi::sampling_distributions::SamplingParams, mockDynamicsParams>\n{\npublic:\n  MOCK_METHOD1(bindToStream, void(cudaStream_t stream));\n  MOCK_METHOD0(GPUSetup, void());\n  MOCK_METHOD1(resizeVisualizationCotnrolTrajectories, void(bool synchronize));\n  MOCK_METHOD1(allocateCUDAMemory, void(bool synchronize));\n  MOCK_METHOD0(allocateCUDAMemoryHelper, void());\n  MOCK_METHOD0(freeCudaMem, void());\n  MOCK_METHOD4(generateSamples,\n               void(const int& opt_stride, const int& iteration_num, curandGenerator_t& gen, bool synchronize));\n  MOCK_METHOD3(setHostOptimalControlSequence,\n               void(float* optimal_control_trajectory, const int& distribution_idx, bool synchronize));\n  MOCK_METHOD4(updateDistributionParamsFromDevice,\n               void(const float* trajectory_weights_d, float normalizer, const int& distribution_i, bool synchronize));\n};\n#endif  // MPPIGENERIC_TESTS_INCLUDE_MPPI_TEST_MOCK_CLASSES_MOCK_SAMPLER_H_\n"
  },
  {
    "path": "tests/integration/CMakeLists.txt",
    "content": "set(CMAKE_CXX_STANDARD 17)\n\nset(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n    get_filename_component(T_NAME ${T_FILE} NAME_WE)\n    add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n    target_link_libraries(${T_NAME}\n            ${GTEST_LIBRARIES}\n            ${MPPI_HEADER_LIBRARY_NAME})\n    gtest_add_tests(TARGET ${T_NAME})\n    set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n\n\n"
  },
  {
    "path": "tests/integration/integrator_tests.cu",
    "content": "//\n// Created by mgandhi3 on 7/22/21.\n//\n\n#include <gtest/gtest.h>\n#include <mppi/utils/numerical_integration.h>\n#include <mppi/dynamics/dynamics.cuh>\n\nstruct TestDynamicsParams : public DynamicsParams\n{\n};\n\nusing namespace MPPI_internal;\n\nclass TestDynamics : public Dynamics<TestDynamics, TestDynamicsParams>\n{\npublic:\n  explicit TestDynamics(cudaStream_t stream = nullptr) : Dynamics<TestDynamics, TestDynamicsParams>(stream){};\n\n  void computeDynamics(const Eigen::Ref<const state_array>& state, const Eigen::Ref<const control_array>& control,\n                       Eigen::Ref<state_array> state_der)\n  {\n    state_der = -2 * state;\n  }\n\n  state_array stateFromMap(const std::map<std::string, float>& map)\n  {\n  }\n};\n\nTEST(Integration, RK4)\n{\n  using DYN = TestDynamics;\n  DYN dynamics;\n  DYN::state_array x_k, x_kp1;\n  x_k << 3;\n  DYN::control_array u_k;\n  float dt = 0.05;\n  int num_timesteps = 100;\n  for (int i = 0; i < num_timesteps; ++i)\n  {\n    rk4integrate(&dynamics, dt, x_k, u_k, x_kp1);\n    x_k = x_kp1;\n  }\n\n  ASSERT_NEAR(0.0, x_k[0], 1e-6);\n}\n"
  },
  {
    "path": "tests/math_utils/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n    get_filename_component(T_NAME ${T_FILE} NAME_WE)\n    add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n    target_link_libraries(${T_NAME}\n            ${GTEST_LIBRARIES}\n            ${MPPI_HEADER_LIBRARY_NAME})\n    gtest_add_tests(TARGET ${T_NAME})\n    set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/math_utils/cuda_math_utils_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/utils/cuda_math_utils.cuh>\n#include <mppi/utils/gpu_err_chk.cuh>\n\n#include <random>\n\ntemplate <class T = float2>\n__global__ void VectorVectorAddTestKernel(const T* __restrict__ input1, const T* __restrict__ input2,\n                                          T* __restrict__ output2)\n{\n  *output2 = *input1 + *input2;\n}\n\ntemplate <class T = float2>\n__global__ void VectorVectorSubTestKernel(const T* __restrict__ input1, const T* __restrict__ input2,\n                                          T* __restrict__ output2)\n{\n  *output2 = *input1 - *input2;\n}\n\ntemplate <class T = float2>\n__global__ void VectorVectorMultTestKernel(const T* __restrict__ input1, const T* __restrict__ input2,\n                                           T* __restrict__ output2)\n{\n  *output2 = (*input1) * (*input2);\n}\n\ntemplate <class T = float2>\n__global__ void VectorVectorDivTestKernel(const T* __restrict__ input1, const T* __restrict__ input2,\n                                          T* __restrict__ output2)\n{\n  *output2 = (*input1) / (*input2);\n}\n\ntemplate <class T = float2, class S = float>\n__global__ void VectorScalarAddTestKernel(const T* __restrict__ input1, const S* __restrict__ input2,\n                                          T* __restrict__ output2)\n{\n  *output2 = *input1 + *input2;\n}\n\ntemplate <class T = float2, class S = float>\n__global__ void VectorScalarMultTestKernel(const T* __restrict__ input1, const S* __restrict__ input2,\n                                           T* __restrict__ output2)\n{\n  *output2 = (*input1) * (*input2);\n}\n\ntemplate <class T = float2, class S = float>\n__global__ void VectorScalarSubTestKernel(const T* __restrict__ input1, const S* __restrict__ input2,\n                                          T* __restrict__ output2)\n{\n  *output2 = *input1 - *input2;\n}\n\ntemplate <class T = float2, class S = float>\n__global__ void VectorScalarDivTestKernel(const T* __restrict__ input1, const S* __restrict__ input2,\n                                          T* __restrict__ output2)\n{\n  *output2 = (*input1) / (*input2);\n}\n\ntemplate <class T = float2, class S = float>\n__global__ void VectorVectorScalarAddMultTestKernel(const T* __restrict__ input1, const T* __restrict__ input2,\n                                                    const S* __restrict__ scalar, T* __restrict__ output2)\n{\n  *output2 = *input1 + (*input2) * (*scalar);\n}\n\ntemplate <class T = float2>\nclass CudaFloatStructsTests : public ::testing::Test\n{\npublic:\n  T input1_cpu;\n  T input2_cpu;\n  T output_cpu;\n  T output_gpu;\n  T* input1_d;\n  T* input2_d;\n  T* output_d;\n\n  float scalar;\n  float* scalar_d;\n\n  void SetUp() override\n  {\n    HANDLE_ERROR(cudaMalloc((void**)&scalar_d, sizeof(float)));\n    HANDLE_ERROR(cudaMalloc((void**)&input1_d, sizeof(T)));\n    HANDLE_ERROR(cudaMalloc((void**)&input2_d, sizeof(T)));\n    HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(T)));\n\n    // Setup random number generator\n    std::random_device rd;\n    std::mt19937 gen(rd());\n    std::uniform_real_distribution<float> dist(0.0f, 10.f);\n\n    initializeStruct(input1_cpu, gen, dist);\n    initializeStruct(input2_cpu, gen, dist);\n    HANDLE_ERROR(cudaMemcpy(input1_d, &input1_cpu, sizeof(T), cudaMemcpyHostToDevice));\n    HANDLE_ERROR(cudaMemcpy(input2_d, &input2_cpu, sizeof(T), cudaMemcpyHostToDevice));\n    HANDLE_ERROR(cudaMemcpy(scalar_d, &scalar, sizeof(float), cudaMemcpyHostToDevice));\n  }\n\n  void initializeStruct(T& val, std::mt19937& gen, std::uniform_real_distribution<float>& dist)\n  {\n    std::cout << \"Did not specialize!\" << std::endl;\n  }\n\n  T addVec(const T& val1, const T& val2)\n  {\n    T a;\n    std::cout << \"Wrong addVec!\" << std::endl;\n    return a;\n  }\n\n  T addScalar(const T& val1, const float& val2)\n  {\n    T a;\n    std::cout << \"Wrong addScalar!\" << std::endl;\n    return a;\n  }\n\n  T subVec(const T& val1, const T& val2)\n  {\n    T a;\n    std::cout << \"Wrong subVec!\" << std::endl;\n    return a;\n  }\n\n  T subScalar(const T& val1, const float& val2)\n  {\n    T a;\n    std::cout << \"Wrong subScalar!\" << std::endl;\n    return a;\n  }\n\n  T multVec(const T& val1, const T& val2)\n  {\n    T a;\n    std::cout << \"Wrong multVec!\" << std::endl;\n    return a;\n  }\n\n  T multScalar(const T& val1, const float& val2)\n  {\n    T a;\n    std::cout << \"Wrong multScalar!\" << std::endl;\n    return a;\n  }\n\n  T divVec(const T& val1, const T& val2)\n  {\n    T a;\n    std::cout << \"Wrong divVec!\" << std::endl;\n    return a;\n  }\n\n  T divScalar(const T& val1, const float& val2)\n  {\n    T a;\n    std::cout << \"Wrong divScalar!\" << std::endl;\n    return a;\n  }\n\n  bool assert_same(const T& val1, const T& val2, const std::string& str = \"\");\n\n  void TearDown() override\n  {\n    HANDLE_ERROR(cudaFree(scalar_d));\n    HANDLE_ERROR(cudaFree(input1_d));\n    HANDLE_ERROR(cudaFree(input2_d));\n    HANDLE_ERROR(cudaFree(output_d));\n  }\n};\n\ntemplate <>\nvoid CudaFloatStructsTests<float2>::initializeStruct(float2& val, std::mt19937& gen,\n                                                     std::uniform_real_distribution<float>& dist)\n{\n  val = make_float2(dist(gen), dist(gen));\n}\n\ntemplate <>\nbool CudaFloatStructsTests<float2>::assert_same(const float2& val1, const float2& val2, const std::string& str)\n{\n  bool result = true;\n  result = result && (val1.x == val2.x);\n  result = result && (val1.y == val2.y);\n  if (!result)\n  {\n    printf(\"(%f, %f) != (%f, %f)\", val1.x, val1.y, val2.x, val2.y);\n    std::cout << str << std::endl;\n  }\n  return result;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::addVec(const float2& val1, const float2& val2)\n{\n  float2 output;\n  output.x = val1.x + val2.x;\n  output.y = val1.y + val2.y;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::multVec(const float2& val1, const float2& val2)\n{\n  float2 output;\n  output.x = val1.x * val2.x;\n  output.y = val1.y * val2.y;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::addScalar(const float2& val1, const float& val2)\n{\n  float2 output;\n  output.x = val1.x + val2;\n  output.y = val1.y + val2;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::multScalar(const float2& val1, const float& val2)\n{\n  float2 output;\n  output.x = val1.x * val2;\n  output.y = val1.y * val2;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::subVec(const float2& val1, const float2& val2)\n{\n  float2 output;\n  output.x = val1.x - val2.x;\n  output.y = val1.y - val2.y;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::divVec(const float2& val1, const float2& val2)\n{\n  float2 output;\n  output.x = val1.x / val2.x;\n  output.y = val1.y / val2.y;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::subScalar(const float2& val1, const float& val2)\n{\n  float2 output;\n  output.x = val1.x - val2;\n  output.y = val1.y - val2;\n  return output;\n}\n\ntemplate <>\nfloat2 CudaFloatStructsTests<float2>::divScalar(const float2& val1, const float& val2)\n{\n  float2 output;\n  output.x = val1.x / val2;\n  output.y = val1.y / val2;\n  return output;\n}\n\ntemplate <>\nvoid CudaFloatStructsTests<float4>::initializeStruct(float4& val, std::mt19937& gen,\n                                                     std::uniform_real_distribution<float>& dist)\n{\n  val = make_float4(dist(gen), dist(gen), dist(gen), dist(gen));\n}\n\ntemplate <>\nbool CudaFloatStructsTests<float4>::assert_same(const float4& val1, const float4& val2, const std::string& str)\n{\n  bool result = true;\n  result = result && (val1.x == val2.x);\n  result = result && (val1.y == val2.y);\n  result = result && (val1.z == val2.z);\n  result = result && (val1.w == val2.w);\n  if (!result)\n  {\n    printf(\"(%f, %f, %f, %f) != (%f, %f, %f, %f)\", val1.x, val1.y, val1.z, val1.w, val2.x, val2.y, val2.z, val2.w);\n    std::cout << str << std::endl;\n  }\n  return result;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::addVec(const float4& val1, const float4& val2)\n{\n  float4 output;\n  output.x = val1.x + val2.x;\n  output.y = val1.y + val2.y;\n  output.z = val1.z + val2.z;\n  output.w = val1.w + val2.w;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::multVec(const float4& val1, const float4& val2)\n{\n  float4 output;\n  output.x = val1.x * val2.x;\n  output.y = val1.y * val2.y;\n  output.z = val1.z * val2.z;\n  output.w = val1.w * val2.w;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::addScalar(const float4& val1, const float& val2)\n{\n  float4 output;\n  output.x = val1.x + val2;\n  output.y = val1.y + val2;\n  output.z = val1.z + val2;\n  output.w = val1.w + val2;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::multScalar(const float4& val1, const float& val2)\n{\n  float4 output;\n  output.x = val1.x * val2;\n  output.y = val1.y * val2;\n  output.z = val1.z * val2;\n  output.w = val1.w * val2;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::subVec(const float4& val1, const float4& val2)\n{\n  float4 output;\n  output.x = val1.x - val2.x;\n  output.y = val1.y - val2.y;\n  output.z = val1.z - val2.z;\n  output.w = val1.w - val2.w;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::divVec(const float4& val1, const float4& val2)\n{\n  float4 output;\n  output.x = val1.x / val2.x;\n  output.y = val1.y / val2.y;\n  output.z = val1.z / val2.z;\n  output.w = val1.w / val2.w;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::subScalar(const float4& val1, const float& val2)\n{\n  float4 output;\n  output.x = val1.x - val2;\n  output.y = val1.y - val2;\n  output.z = val1.z - val2;\n  output.w = val1.w - val2;\n  return output;\n}\n\ntemplate <>\nfloat4 CudaFloatStructsTests<float4>::divScalar(const float4& val1, const float& val2)\n{\n  float4 output;\n  output.x = val1.x / val2;\n  output.y = val1.y / val2;\n  output.z = val1.z / val2;\n  output.w = val1.w / val2;\n  return output;\n}\n\ntemplate <>\nvoid CudaFloatStructsTests<float3>::initializeStruct(float3& val, std::mt19937& gen,\n                                                     std::uniform_real_distribution<float>& dist)\n{\n  val = make_float3(dist(gen), dist(gen), dist(gen));\n}\n\ntemplate <>\nbool CudaFloatStructsTests<float3>::assert_same(const float3& val1, const float3& val2, const std::string& str)\n{\n  bool result = true;\n  result = result && (val1.x == val2.x);\n  result = result && (val1.y == val2.y);\n  result = result && (val1.z == val2.z);\n  if (!result)\n  {\n    printf(\"(%f, %f, %f) != (%f, %f, %f)\", val1.x, val1.y, val1.z, val2.x, val2.y, val2.z);\n    std::cout << str << std::endl;\n  }\n  return result;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::addVec(const float3& val1, const float3& val2)\n{\n  float3 output;\n  output.x = val1.x + val2.x;\n  output.y = val1.y + val2.y;\n  output.z = val1.z + val2.z;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::multVec(const float3& val1, const float3& val2)\n{\n  float3 output;\n  output.x = val1.x * val2.x;\n  output.y = val1.y * val2.y;\n  output.z = val1.z * val2.z;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::addScalar(const float3& val1, const float& val2)\n{\n  float3 output;\n  output.x = val1.x + val2;\n  output.y = val1.y + val2;\n  output.z = val1.z + val2;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::multScalar(const float3& val1, const float& val2)\n{\n  float3 output;\n  output.x = val1.x * val2;\n  output.y = val1.y * val2;\n  output.z = val1.z * val2;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::subVec(const float3& val1, const float3& val2)\n{\n  float3 output;\n  output.x = val1.x - val2.x;\n  output.y = val1.y - val2.y;\n  output.z = val1.z - val2.z;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::divVec(const float3& val1, const float3& val2)\n{\n  float3 output;\n  output.x = val1.x / val2.x;\n  output.y = val1.y / val2.y;\n  output.z = val1.z / val2.z;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::subScalar(const float3& val1, const float& val2)\n{\n  float3 output;\n  output.x = val1.x - val2;\n  output.y = val1.y - val2;\n  output.z = val1.z - val2;\n  return output;\n}\n\ntemplate <>\nfloat3 CudaFloatStructsTests<float3>::divScalar(const float3& val1, const float& val2)\n{\n  float3 output;\n  output.x = val1.x / val2;\n  output.y = val1.y / val2;\n  output.z = val1.z / val2;\n  return output;\n}\n\nusing TYPE_TESTS = ::testing::Types<float2, float3, float4>;\n\nTYPED_TEST_SUITE(CudaFloatStructsTests, TYPE_TESTS);\n\nTYPED_TEST(CudaFloatStructsTests, VecAddScalar)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->addScalar(this->input1_cpu, this->scalar);\n\n  this->output_cpu = this->input1_cpu + this->scalar;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorScalarAddTestKernel<T, float><<<blocks, threads>>>(this->input1_d, this->scalar_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecSubScalar)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->subScalar(this->input1_cpu, this->scalar);\n\n  this->output_cpu = this->input1_cpu - this->scalar;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorScalarSubTestKernel<T, float><<<blocks, threads>>>(this->input1_d, this->scalar_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecMultScalar)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->multScalar(this->input1_cpu, this->scalar);\n\n  this->output_cpu = this->input1_cpu * this->scalar;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorScalarMultTestKernel<T, float><<<blocks, threads>>>(this->input1_d, this->scalar_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecDivScalar)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->divScalar(this->input1_cpu, this->scalar);\n\n  this->output_cpu = this->input1_cpu / this->scalar;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorScalarDivTestKernel<T, float><<<blocks, threads>>>(this->input1_d, this->scalar_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecAddVec)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->addVec(this->input1_cpu, this->input2_cpu);\n\n  this->output_cpu = this->input1_cpu + this->input2_cpu;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorVectorAddTestKernel<T><<<blocks, threads>>>(this->input1_d, this->input2_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecSubVec)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->subVec(this->input1_cpu, this->input2_cpu);\n\n  this->output_cpu = this->input1_cpu - this->input2_cpu;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorVectorSubTestKernel<T><<<blocks, threads>>>(this->input1_d, this->input2_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecDivVec)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->divVec(this->input1_cpu, this->input2_cpu);\n\n  this->output_cpu = this->input1_cpu / this->input2_cpu;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorVectorDivTestKernel<T><<<blocks, threads>>>(this->input1_d, this->input2_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n\nTYPED_TEST(CudaFloatStructsTests, VecAddVecMultScalar)\n{\n  using T = TypeParam;\n  T ground_truth_output = this->addVec(this->input1_cpu, this->multScalar(this->input2_cpu, this->scalar));\n\n  this->output_cpu = this->input1_cpu + this->input2_cpu * this->scalar;\n  std::string cpu_fail = \"Doesn't pass cpu test\";\n  ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_cpu)) << cpu_fail;\n\n  // Parallelization tests\n  T zero = this->output_gpu;\n  for (int blocks = 1; blocks < 10; blocks++)\n  {\n    for (int threads = 1; threads < 128; threads++)\n    {\n      HANDLE_ERROR(cudaMemcpy(this->output_d, &zero, sizeof(T), cudaMemcpyHostToDevice));\n      VectorVectorScalarAddMultTestKernel<T, float>\n          <<<blocks, threads>>>(this->input1_d, this->input2_d, this->scalar_d, this->output_d);\n      HANDLE_ERROR(cudaMemcpy(&this->output_gpu, this->output_d, sizeof(T), cudaMemcpyDeviceToHost));\n      std::string fail_gpu =\n          \" failed with \" + std::to_string(blocks) + \" blocks of \" + std::to_string(threads) + \" threads\";\n      ASSERT_TRUE(this->assert_same(ground_truth_output, this->output_gpu, fail_gpu)) << fail_gpu;\n    }\n  }\n}\n"
  },
  {
    "path": "tests/math_utils/math_utils_test.cu",
    "content": "#include <array>\n\n#include <gtest/gtest.h>\n#include <mppi/utils/test_helper.h>\n#include <mppi/utils/math_utils.h>\n#include <mppi/utils/gpu_err_chk.cuh>\n#include <mppi/utils/cuda_math_utils.cuh>\n\nTEST(MATH_UTILS, SQ)\n{\n  EXPECT_FLOAT_EQ(0.25, SQ(0.5));\n  EXPECT_FLOAT_EQ(0.25, SQ(0.5 - 1.0));\n  EXPECT_FLOAT_EQ(25, SQ(5));\n  EXPECT_FLOAT_EQ(25, SQ(5 - 10));\n  EXPECT_FLOAT_EQ(625, SQ(5 * 5));\n}\n\nTEST(MATH_UTILS, QuatInv)\n{\n  // Create an unnormalized quaternion.\n  std::array<float, 4> q{ 1, 2, 3, 2 };\n  std::array<float, 4> q_inv{};\n\n  mppi::math::QuatInv(q.data(), q_inv.data());\n\n  // Compute the inverse using eigen.\n  // Don't use the array constructor because eigen stores it as [x, y, z, w] internally.\n  const Eigen::Quaternionf eigen_q{ q[0], q[1], q[2], q[3] };\n  const Eigen::Quaternionf eigen_q_inv = eigen_q.inverse().normalized();\n  std::array<float, 4> correct_q_inv{ eigen_q_inv.w(), eigen_q_inv.x(), eigen_q_inv.y(), eigen_q_inv.z() };\n\n  // q_inv should be normalized.\n  constexpr float tol = 1e-7;\n  array_assert_float_near<4>(q_inv, correct_q_inv, tol);\n}\n\nTEST(MATH_UTILS, RotatePointByDCM)\n{\n  const float tol = 1e-6;\n  Eigen::Quaternionf q_eig = Eigen::Quaternionf::UnitRandom();\n  Eigen::Vector3f point_eig = Eigen::Vector3f::Random();\n\n  std::array<float, 4> q{ q_eig.w(), q_eig.x(), q_eig.y(), q_eig.z() };\n  float3 point = make_float3(point_eig.x(), point_eig.y(), point_eig.z());\n\n  float M[3][3];\n  Eigen::Matrix3f M_eig;\n  mppi::math::Quat2DCM(q.data(), M);\n  mppi::math::Quat2DCM(q_eig, M_eig);\n  for (int row = 0; row < 3; row++)\n  {\n    for (int col = 0; col < 3; col++)\n    {\n      ASSERT_NEAR(M[row][col], M_eig(row, col), tol) << \" failed at row: \" << row << \", col: \" << col;\n    }\n  }\n\n  float3 result, result_eig_rotation_matrix;\n  Eigen::Vector3f result_eig;\n  mppi::math::RotatePointByDCM(M, point, result);\n  mppi::math::RotatePointByDCM(M_eig, point, result_eig_rotation_matrix);\n  mppi::math::RotatePointByDCM(M_eig, point_eig, result_eig);\n  ASSERT_NEAR(result.x, result_eig_rotation_matrix.x, tol) << \" failed comparing R stored in 2d float array and R in \"\n                                                              \"Eigen matrix\";\n  ASSERT_NEAR(result.y, result_eig_rotation_matrix.y, tol) << \" failed comparing R stored in 2d float array and R in \"\n                                                              \"Eigen matrix\";\n  ASSERT_NEAR(result.z, result_eig_rotation_matrix.z, tol) << \" failed comparing R stored in 2d float array and R in \"\n                                                              \"Eigen matrix\";\n  ASSERT_NEAR(result.x, result_eig.x(), tol) << \" failed comparing GPU and Eigen methods of rotating by DCM\";\n  ASSERT_NEAR(result.y, result_eig.y(), tol) << \" failed comparing GPU and Eigen methods of rotating by DCM\";\n  ASSERT_NEAR(result.z, result_eig.z(), tol) << \" failed comparing GPU and Eigen methods of rotating by DCM\";\n}\n\nTEST(MATH_UTILS, RotatePointByDCMTranspose)\n{\n  const float tol = 1e-6;\n  Eigen::Quaternionf q_eig = Eigen::Quaternionf::UnitRandom();\n  Eigen::Vector3f point_eig = Eigen::Vector3f::Random();\n\n  std::array<float, 4> q{ q_eig.w(), q_eig.x(), q_eig.y(), q_eig.z() };\n  float3 point = make_float3(point_eig.x(), point_eig.y(), point_eig.z());\n\n  float M[3][3];\n  Eigen::Matrix3f M_eig;\n  mppi::math::Quat2DCM(q.data(), M);\n  mppi::math::Quat2DCM(q_eig, M_eig);\n  for (int row = 0; row < 3; row++)\n  {\n    for (int col = 0; col < 3; col++)\n    {\n      ASSERT_NEAR(M[row][col], M_eig(row, col), tol) << \" failed at row: \" << row << \", col: \" << col;\n    }\n  }\n\n  float3 result, result_eig_rotation_matrix;\n  Eigen::Vector3f result_eig;\n  mppi::math::RotatePointByDCM(M, point, result, mppi::matrix_multiplication::MAT_OP::TRANSPOSE);\n  mppi::math::RotatePointByDCM(M_eig, point, result_eig_rotation_matrix,\n                               mppi::matrix_multiplication::MAT_OP::TRANSPOSE);\n  mppi::math::RotatePointByDCM(M_eig, point_eig, result_eig, mppi::matrix_multiplication::MAT_OP::TRANSPOSE);\n  ASSERT_NEAR(result.x, result_eig_rotation_matrix.x, tol) << \" failed comparing R stored in 2d float array and R in \"\n                                                              \"Eigen matrix\";\n  ASSERT_NEAR(result.y, result_eig_rotation_matrix.y, tol) << \" failed comparing R stored in 2d float array and R in \"\n                                                              \"Eigen matrix\";\n  ASSERT_NEAR(result.z, result_eig_rotation_matrix.z, tol) << \" failed comparing R stored in 2d float array and R in \"\n                                                              \"Eigen matrix\";\n  ASSERT_NEAR(result.x, result_eig.x(), tol) << \" failed comparing GPU and Eigen methods of rotating by DCM\";\n  ASSERT_NEAR(result.y, result_eig.y(), tol) << \" failed comparing GPU and Eigen methods of rotating by DCM\";\n  ASSERT_NEAR(result.z, result_eig.z(), tol) << \" failed comparing GPU and Eigen methods of rotating by DCM\";\n}\n\nTEST(MATH_UTILS, QuatMultiply)\n{\n  // Create an unnormalized quaternion.\n  std::array<float, 4> q1{ 1, 2, 3, 4 };\n  std::array<float, 4> q2{ 8, 7, 6, 5 };\n  std::array<float, 4> q3{};\n  std::array<float, 4> q3_norm{};\n  Eigen::Quaternionf eigen_result, eigen_result_norm;\n\n  // Compare the multiplication using eigen.\n  // Don't use the array constructor because eigen stores it as [x, y, z, w] internally.\n  const Eigen::Quaternionf eigen_q1{ q1[0], q1[1], q1[2], q1[3] };\n  const Eigen::Quaternionf eigen_q2{ q2[0], q2[1], q2[2], q2[3] };\n\n  // Ground Truth\n  const Eigen::Quaternionf eigen_q3 = (eigen_q1 * eigen_q2);\n  const Eigen::Quaternionf eigen_q3_norm = eigen_q3.normalized();\n\n  // Our Methods\n  mppi::math::QuatMultiply(q1.data(), q2.data(), q3.data(), false);\n  mppi::math::QuatMultiply(q1.data(), q2.data(), q3_norm.data());\n  mppi::math::QuatMultiply(eigen_q1, eigen_q2, eigen_result, false);\n  mppi::math::QuatMultiply(eigen_q1, eigen_q2, eigen_result_norm);\n\n  std::array<float, 4> correct_q3{ eigen_q3.w(), eigen_q3.x(), eigen_q3.y(), eigen_q3.z() };\n  std::array<float, 4> eigen_q3_array{ eigen_result.w(), eigen_result.x(), eigen_result.y(), eigen_result.z() };\n  std::array<float, 4> correct_q3_norm{ eigen_q3_norm.w(), eigen_q3_norm.x(), eigen_q3_norm.y(), eigen_q3_norm.z() };\n  std::array<float, 4> eigen_q3_norm_array{ eigen_result_norm.w(), eigen_result_norm.x(), eigen_result_norm.y(),\n                                            eigen_result_norm.z() };\n\n  // q_inv should be normalized.\n  constexpr auto tol = 1e-7;\n  array_assert_float_near<4>(q3, correct_q3, tol);\n  array_assert_float_near<4>(eigen_q3_array, correct_q3, tol);\n  array_assert_float_near<4>(q3_norm, correct_q3_norm, tol);\n  array_assert_float_near<4>(eigen_q3_norm_array, correct_q3_norm, tol);\n}\n\nTEST(MATH_UTILS, RotatePointByQuat)\n{\n  for (int i = 0; i < 100; i++)\n  {\n    Eigen::Quaternionf rotation = Eigen::Quaternionf::UnitRandom();\n    Eigen::Vector3f translation = Eigen::Vector3f::Random();\n\n    float3 point = make_float3(translation.x(), translation.y(), translation.z());\n    float q[4] = { rotation.w(), rotation.x(), rotation.y(), rotation.z() };\n\n    float3 output, output_eig;\n    Eigen::Vector3f output_all_eig;\n\n    // Test method with 3 parameters, output going into the last\n    mppi::math::RotatePointByQuat(rotation, point, output_eig);\n    mppi::math::RotatePointByQuat(q, point, output);\n    mppi::math::RotatePointByQuat(rotation, translation, output_all_eig);\n\n    // Test method with 2 parameters, modifying the second\n    float3 output2 = point;\n    float3 output2_eig = point;\n    Eigen::Vector3f output2_all_eig = translation;\n    mppi::math::RotatePointByQuat(rotation, output2_eig);\n    mppi::math::RotatePointByQuat(q, output2);\n    mppi::math::RotatePointByQuat(rotation, output2_all_eig);\n\n    // Ground Truth\n    auto result = rotation * translation;\n\n    EXPECT_NEAR(output.x, result.x(), 1.0e-5);\n    EXPECT_NEAR(output.y, result.y(), 1.0e-5);\n    EXPECT_NEAR(output.z, result.z(), 1.0e-5);\n    EXPECT_NEAR(output_eig.x, result.x(), 1.0e-5);\n    EXPECT_NEAR(output_eig.y, result.y(), 1.0e-5);\n    EXPECT_NEAR(output_eig.z, result.z(), 1.0e-5);\n    EXPECT_NEAR(output_all_eig.x(), result.x(), 1.0e-5);\n    EXPECT_NEAR(output_all_eig.y(), result.y(), 1.0e-5);\n    EXPECT_NEAR(output_all_eig.z(), result.z(), 1.0e-5);\n    EXPECT_NEAR(output2.x, result.x(), 1.0e-5);\n    EXPECT_NEAR(output2.y, result.y(), 1.0e-5);\n    EXPECT_NEAR(output2.z, result.z(), 1.0e-5);\n    EXPECT_NEAR(output2_eig.x, result.x(), 1.0e-5);\n    EXPECT_NEAR(output2_eig.y, result.y(), 1.0e-5);\n    EXPECT_NEAR(output2_eig.z, result.z(), 1.0e-5);\n    EXPECT_NEAR(output2_all_eig.x(), result.x(), 1.0e-5);\n    EXPECT_NEAR(output2_all_eig.y(), result.y(), 1.0e-5);\n    EXPECT_NEAR(output2_all_eig.z(), result.z(), 1.0e-5);\n  }\n}\n\nTEST(MATH_UTILS, QuatDCM)\n{\n  for (int iteration = 0; iteration < 100; iteration++)\n  {\n    Eigen::Quaternionf q_eig = Eigen::Quaternionf::UnitRandom();\n    float q[4] = { q_eig.w(), q_eig.x(), q_eig.y(), q_eig.z() };\n    float M[3][3];\n    Eigen::Matrix3f M_eig;\n    M_eig = q_eig.toRotationMatrix();\n    mppi::math::Quat2DCM(q, M);\n    for (int i = 0; i < 3; i++)\n    {\n      for (int j = 0; j < 3; j++)\n      {\n        EXPECT_NEAR(M_eig(i, j), M[i][j], 1.0e-5);\n      }\n    }\n  }\n}\n\nTEST(MATH_UTILS, EulerDCM)\n{\n  for (int iteration = 0; iteration < 100; iteration++)\n  {\n    Eigen::Quaternionf q_eig = Eigen::Quaternionf::UnitRandom();\n    float r, p, y;\n    float M[3][3];\n    Eigen::Matrix3f M_eig, M_result;\n    M_result = q_eig.toRotationMatrix();\n\n    mppi::math::Quat2EulerNWU(q_eig, r, p, y);\n    mppi::math::Euler2DCM_NWU(r, p, y, M);\n    mppi::math::Euler2DCM_NWU(r, p, y, M_eig);\n    for (int i = 0; i < 3; i++)\n    {\n      for (int j = 0; j < 3; j++)\n      {\n        EXPECT_NEAR(M_result(i, j), M[i][j], 1.0e-5);\n        EXPECT_NEAR(M_result(i, j), M_eig(i, j), 1.0e-5);\n      }\n    }\n  }\n}\n\nTEST(MATH_UTILS, TranslateVectorByQuat)\n{\n  const float tol = 1e-6;\n  Eigen::Vector3f origin_eig = Eigen::Vector3f::Random();\n  Eigen::Vector3f body_pose_eig = Eigen::Vector3f::Random();\n  Eigen::Quaternionf rotation_eig = Eigen::Quaternionf::UnitRandom();\n\n  float3 origin_f3 = make_float3(origin_eig.x(), origin_eig.y(), origin_eig.z());\n  float3 body_pose_f3 = make_float3(body_pose_eig.x(), body_pose_eig.y(), body_pose_eig.z());\n  float rotation_array[4] = { rotation_eig.w(), rotation_eig.x(), rotation_eig.y(), rotation_eig.z() };\n  // Create output variables\n  float3 output_eig, output_array;\n  Eigen::Vector3f output_all_eig;\n\n  // Ground Truth\n  Eigen::Matrix3f R_eig = rotation_eig.toRotationMatrix();\n  Eigen::Vector3f correct_value = origin_eig + R_eig * body_pose_eig;\n\n  mppi::math::bodyOffsetToWorldPoseQuat(body_pose_f3, origin_f3, rotation_eig, output_eig);\n  mppi::math::bodyOffsetToWorldPoseQuat(body_pose_f3, origin_f3, rotation_array, output_array);\n  mppi::math::bodyOffsetToWorldPoseQuat(body_pose_eig, origin_eig, rotation_eig, output_all_eig);\n\n  EXPECT_NEAR(output_eig.x, correct_value.x(), tol);\n  EXPECT_NEAR(output_eig.y, correct_value.y(), tol);\n  EXPECT_NEAR(output_eig.z, correct_value.z(), tol);\n  EXPECT_NEAR(output_array.x, correct_value.x(), tol);\n  EXPECT_NEAR(output_array.y, correct_value.y(), tol);\n  EXPECT_NEAR(output_array.z, correct_value.z(), tol);\n  EXPECT_NEAR(output_all_eig.x(), correct_value.x(), tol);\n  EXPECT_NEAR(output_all_eig.y(), correct_value.y(), tol);\n  EXPECT_NEAR(output_all_eig.z(), correct_value.z(), tol);\n}\n\nTEST(MATH_UTILS, TranslateVectorByDCM)\n{\n  const float tol = 1e-6;\n  Eigen::Vector3f origin_eig = Eigen::Vector3f::Random();\n  Eigen::Vector3f body_pose_eig = Eigen::Vector3f::Random();\n  Eigen::Quaternionf rotation_eig = Eigen::Quaternionf::UnitRandom();\n\n  float3 origin_f3 = make_float3(origin_eig.x(), origin_eig.y(), origin_eig.z());\n  float3 body_pose_f3 = make_float3(body_pose_eig.x(), body_pose_eig.y(), body_pose_eig.z());\n  Eigen::Matrix3f R_eig = rotation_eig.toRotationMatrix();\n  float R_float[3][3];\n  for (int row = 0; row < 3; row++)\n  {\n    for (int col = 0; col < 3; col++)\n    {\n      R_float[row][col] = R_eig(row, col);\n    }\n  }\n  // Create output variables\n  float3 output_eig, output_array;\n  Eigen::Vector3f output_all_eig;\n\n  // Ground Truth\n  Eigen::Vector3f correct_value = origin_eig + R_eig * body_pose_eig;\n\n  // Our Methods\n  mppi::math::bodyOffsetToWorldPoseDCM(body_pose_f3, origin_f3, R_eig, output_eig);\n  mppi::math::bodyOffsetToWorldPoseDCM(body_pose_f3, origin_f3, R_float, output_array);\n  mppi::math::bodyOffsetToWorldPoseDCM(body_pose_eig, origin_eig, R_eig, output_all_eig);\n\n  EXPECT_NEAR(output_eig.x, correct_value.x(), tol);\n  EXPECT_NEAR(output_eig.y, correct_value.y(), tol);\n  EXPECT_NEAR(output_eig.z, correct_value.z(), tol);\n  EXPECT_NEAR(output_array.x, correct_value.x(), tol);\n  EXPECT_NEAR(output_array.y, correct_value.y(), tol);\n  EXPECT_NEAR(output_array.z, correct_value.z(), tol);\n  EXPECT_NEAR(output_all_eig.x(), correct_value.x(), tol);\n  EXPECT_NEAR(output_all_eig.y(), correct_value.y(), tol);\n  EXPECT_NEAR(output_all_eig.z(), correct_value.z(), tol);\n}\n\nTEST(MATH_UTILS, TranslateVectorByEuler)\n{\n  const float tol = 1e-6;\n  Eigen::Vector3f origin_eig = Eigen::Vector3f::Random();\n  Eigen::Vector3f body_pose_eig = Eigen::Vector3f::Random();\n  Eigen::Quaternionf q = Eigen::Quaternionf::UnitRandom();\n  Eigen::Matrix3f R_eig = q.toRotationMatrix();\n\n  float3 origin_f3 = make_float3(origin_eig.x(), origin_eig.y(), origin_eig.z());\n  float3 body_pose_f3 = make_float3(body_pose_eig.x(), body_pose_eig.y(), body_pose_eig.z());\n\n  float3 rotation;\n  Eigen::Vector3f rotation_eig;\n  mppi::math::Quat2EulerNWU(q, rotation.x, rotation.y, rotation.z);\n  rotation_eig << rotation.x, rotation.y, rotation.z;\n\n  // Create output variables\n  float3 output_eig, output_array;\n  Eigen::Vector3f output_all_eig;\n\n  // Ground Truth\n  Eigen::Vector3f correct_value = origin_eig + R_eig * body_pose_eig;\n\n  // Our Methods\n  mppi::math::bodyOffsetToWorldPoseEuler(body_pose_f3, origin_f3, rotation_eig, output_eig);\n  mppi::math::bodyOffsetToWorldPoseEuler(body_pose_f3, origin_f3, rotation, output_array);\n  mppi::math::bodyOffsetToWorldPoseEuler(body_pose_eig, origin_eig, rotation_eig, output_all_eig);\n\n  EXPECT_NEAR(output_eig.x, correct_value.x(), tol);\n  EXPECT_NEAR(output_eig.y, correct_value.y(), tol);\n  EXPECT_NEAR(output_eig.z, correct_value.z(), tol);\n  EXPECT_NEAR(output_array.x, correct_value.x(), tol);\n  EXPECT_NEAR(output_array.y, correct_value.y(), tol);\n  EXPECT_NEAR(output_array.z, correct_value.z(), tol);\n  EXPECT_NEAR(output_all_eig.x(), correct_value.x(), tol);\n  EXPECT_NEAR(output_all_eig.y(), correct_value.y(), tol);\n  EXPECT_NEAR(output_all_eig.z(), correct_value.z(), tol);\n}\n\nTEST(MATH_UTILS, SkewSymmetricMatrixSameAsCrossProd)\n{\n  Eigen::Vector3f a(1, 2, 3);\n  Eigen::Vector3f b(8, 3, 9);\n  eigen_assert_float_eq<Eigen::Vector3f>(a.cross(b), mppi::math::skewSymmetricMatrix(a) * b);\n}\n\nnamespace mm = mppi::matrix_multiplication;\nnamespace mp1 = mppi::p1;\nnamespace mp2 = mppi::p2;\n\ntemplate <int M, int K, int N, mp1::Parallel1Dir PARALLELIZATION_DIR, mm::MAT_OP A_OP = mm::MAT_OP::NONE,\n          mm::MAT_OP B_OP = mm::MAT_OP::NONE, class T = float>\n__global__ void gemm1d(T* A, T* B, T* C, T alpha = 1, T beta = 0, int shared_mem_size = 0)\n{\n  extern __shared__ float buff[];\n  T* A_p;\n  T* B_p;\n  if (shared_mem_size != 0)\n  {\n    A_p = (T*)&buff[0];\n    B_p = &A_p[M * K];\n    mp1::loadArrayParallel<M * K, PARALLELIZATION_DIR, T>(A_p, 0, A, 0);\n    mp1::loadArrayParallel<K * N, PARALLELIZATION_DIR, T>((T*)&buff[0], M * K, B, 0);\n    __syncthreads();\n  }\n  else\n  {\n    A_p = A;\n    B_p = B;\n  }\n\n  mm::gemm1<M, K, N, PARALLELIZATION_DIR, T>(A_p, B_p, C, alpha, beta, A_OP, B_OP);\n}\n\ntemplate <int M, int K, int N>\n__global__ void gemm1dBatchKernel(const float* A, const float* B, float* C)\n{\n  const float* A_local = &A[M * K * threadIdx.x];\n  const float* B_local = &B[K * N * threadIdx.x];\n  float* C_local = &C[M * N * threadIdx.x];\n  mm::gemm1<M, K, N, mp1::Parallel1Dir::THREAD_Y>(A_local, B_local, C_local);\n}\n\ntemplate <int M, int K, int N, mm::MAT_OP A_OP, mm::MAT_OP B_OP, mp1::Parallel1Dir P_DIR, class T = float>\nfloat2 testMatMulHost(int size, T alpha = 1, T beta = 0)\n{\n  typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> A_MAT;\n  typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> B_MAT;\n  typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> C_MAT;\n\n  A_MAT A;\n  B_MAT B;\n  C_MAT C_Eigen = C_MAT::Ones(M, N);\n  C_MAT C_ours = C_Eigen;\n  C_Eigen *= beta;\n  float eigen_time_ms = 0;\n\n  if (A_OP == mm::MAT_OP::NONE && B_OP == mm::MAT_OP::NONE)\n  {\n    A = A_MAT::Random(M, K);\n    B = B_MAT::Random(K, N);\n    auto start = std::chrono::steady_clock::now();\n    C_Eigen += A * B * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    eigen_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n  else if (A_OP == mm::MAT_OP::TRANSPOSE && B_OP == mm::MAT_OP::NONE)\n  {\n    A = A_MAT::Random(K, M);\n    B = B_MAT::Random(K, N);\n    auto start = std::chrono::steady_clock::now();\n    C_Eigen += A.transpose() * B * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    eigen_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n  if (A_OP == mm::MAT_OP::NONE && B_OP == mm::MAT_OP::TRANSPOSE)\n  {\n    A = A_MAT::Random(M, K);\n    B = B_MAT::Random(N, K);\n    auto start = std::chrono::steady_clock::now();\n    C_Eigen += A * B.transpose() * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    eigen_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n  else if (A_OP == mm::MAT_OP::TRANSPOSE && B_OP == mm::MAT_OP::TRANSPOSE)\n  {\n    A = A_MAT::Random(K, M);\n    B = B_MAT::Random(N, K);\n    auto start = std::chrono::steady_clock::now();\n    C_Eigen += A.transpose() * B.transpose() * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    eigen_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n\n  auto our_start = std::chrono::steady_clock::now();\n  mm::gemm1<M, K, N, P_DIR, T>(A.data(), B.data(), C_ours.data(), alpha, beta, A_OP, B_OP);\n  auto our_stop = std::chrono::steady_clock::now();\n  float ours_time_ms = mppi::math::timeDiffms(our_stop, our_start);\n  double avg_err = (C_Eigen - C_ours).norm() / C_Eigen.size();\n  EXPECT_TRUE(avg_err < 1e-7) << \"C_Eigen:\\n\" << C_Eigen << \"\\nC_ours:\\n\" << C_ours;\n  return make_float2(eigen_time_ms, ours_time_ms);\n}\n\ntemplate <int M, int K, int N, mm::MAT_OP A_OP, mm::MAT_OP B_OP, mp1::Parallel1Dir P_DIR, class T = float>\nfloat2 testMatMulp1(int size, T alpha = 1, T beta = 0)\n{\n  typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> A_MAT;\n  typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> B_MAT;\n  typedef Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> C_MAT;\n\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n  cudaEvent_t start, stop;\n  cudaEventCreate(&start);\n  cudaEventCreate(&stop);\n  A_MAT A;\n  B_MAT B;\n  C_MAT C_CPU = C_MAT::Ones(M, N);\n  C_MAT C_GPU = C_CPU;\n  C_CPU *= beta;\n\n  // Device Memory Locations\n  T* A_d;\n  T* B_d;\n  T* C_d;\n  dim3 block(1, 1, 1);\n  dim3 grid(1, 1, 1);\n\n  HANDLE_ERROR(cudaMalloc((void**)&A_d, sizeof(T) * M * K));\n  HANDLE_ERROR(cudaMalloc((void**)&B_d, sizeof(T) * K * N));\n  HANDLE_ERROR(cudaMalloc((void**)&C_d, sizeof(T) * M * N));\n  float cpu_time_ms = 0;\n\n  if (A_OP == mm::MAT_OP::NONE && B_OP == mm::MAT_OP::NONE)\n  {\n    A = A_MAT::Random(M, K);\n    B = B_MAT::Random(K, N);\n    auto start = std::chrono::steady_clock::now();\n    C_CPU += A * B * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    cpu_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n  else if (A_OP == mm::MAT_OP::TRANSPOSE && B_OP == mm::MAT_OP::NONE)\n  {\n    A = A_MAT::Random(K, M);\n    B = B_MAT::Random(K, N);\n    auto start = std::chrono::steady_clock::now();\n    C_CPU += A.transpose() * B * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    cpu_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n  if (A_OP == mm::MAT_OP::NONE && B_OP == mm::MAT_OP::TRANSPOSE)\n  {\n    A = A_MAT::Random(M, K);\n    B = B_MAT::Random(N, K);\n    auto start = std::chrono::steady_clock::now();\n    C_CPU += A * B.transpose() * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    cpu_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n  else if (A_OP == mm::MAT_OP::TRANSPOSE && B_OP == mm::MAT_OP::TRANSPOSE)\n  {\n    A = A_MAT::Random(K, M);\n    B = B_MAT::Random(N, K);\n    auto start = std::chrono::steady_clock::now();\n    C_CPU += A.transpose() * B.transpose() * alpha;\n    auto stop = std::chrono::steady_clock::now();\n    cpu_time_ms = mppi::math::timeDiffms(stop, start);\n  }\n\n  int grid_dim = 1;\n\n  if (P_DIR == mp1::Parallel1Dir::THREAD_X)\n  {\n    block.x = size;\n  }\n  else if (P_DIR == mp1::Parallel1Dir::THREAD_Y)\n  {\n    block.y = size;\n  }\n  else if (P_DIR == mp1::Parallel1Dir::THREAD_Z)\n  {\n    block.z = size;\n  }\n  else if (P_DIR == mp1::Parallel1Dir::GLOBAL_X)\n  {\n    grid_dim = (M * N - 1) / (size) + 1;\n    block.x = size;\n    grid.x = grid_dim;\n  }\n  else if (P_DIR == mp1::Parallel1Dir::GLOBAL_Y)\n  {\n    grid_dim = (M * N - 1) / (size) + 1;\n    block.y = size;\n    grid.y = grid_dim;\n  }\n  else if (P_DIR == mp1::Parallel1Dir::GLOBAL_Z)\n  {\n    grid_dim = (M * N - 1) / (size) + 1;\n    block.y = size;\n    grid.y = grid_dim;\n  }\n  int shared_mem_num = ((M * K + K * N) * sizeof(T) <= 1 << 16) ? (M * K + K * N) : 0;\n\n  HANDLE_ERROR(cudaMemcpyAsync(A_d, A.data(), sizeof(T) * M * K, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(B_d, B.data(), sizeof(T) * K * N, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(C_d, C_GPU.data(), sizeof(T) * M * N, cudaMemcpyHostToDevice, stream));\n  cudaEventRecord(start, stream);\n  gemm1d<M, K, N, P_DIR, A_OP, B_OP, T>\n      <<<grid, block, shared_mem_num * sizeof(T), stream>>>(A_d, B_d, C_d, alpha, beta, shared_mem_num);\n  cudaEventRecord(stop, stream);\n  HANDLE_ERROR(cudaMemcpyAsync(C_GPU.data(), C_d, sizeof(T) * M * N, cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  HANDLE_ERROR(cudaEventSynchronize(stop));\n\n  float gpu_time_ms = 0;\n  HANDLE_ERROR(cudaEventElapsedTime(&gpu_time_ms, start, stop));\n\n  double avg_err = (C_CPU - C_GPU).norm() / C_CPU.size();\n  EXPECT_TRUE(avg_err < 1e-7) << \"C_CPU:\\n\" << C_CPU << \"\\nC_GPU:\\n\" << C_GPU;\n  cudaFree(A_d);\n  cudaFree(B_d);\n  cudaFree(C_d);\n  return make_float2(cpu_time_ms, gpu_time_ms);\n};\n\nTEST(MATH_UTILS, float_BasicMatrixMult_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<10, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<10, 2, 50, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(256, 1, 0);\n}\n\nTEST(MATH_UTILS, float_BasicMatrixMult_y)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Y, float>(16, 1, 0);\n  testMatMulp1<10, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Y, float>(16, 1, 0);\n  testMatMulp1<10, 2, 50, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Y, float>(256, 1, 0);\n}\n\nTEST(MATH_UTILS, float_BasicMatrixMult_z)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, float>(16, 1, 0);\n  testMatMulp1<10, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, float>(16, 1, 0);\n  testMatMulp1<10, 2, 50, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, float>(64, 1, 0);\n}\n\nTEST(MATH_UTILS, float_BasicMatrixMult_none)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::NONE, float>(16, 1, 0);\n  testMatMulp1<10, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::NONE, float>(16, 1, 0);\n  testMatMulp1<10, 2, 50, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::NONE, float>(256, 1, 0);\n}\n\nTEST(MATH_UTILS, float_TransposeA_MatrixMult_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, float_TransposeA_TransposeB_MatrixMult_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, float_TransposeB_MatrixMult_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, int_BasicMatrixMult_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, int>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, double_BasicMatrixMult_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, double>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, float_MatrixMult_UnalignedK_x)\n{\n  testMatMulp1<3, 17, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 17, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 17, 5, mm::MAT_OP::NONE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 1, 4, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, float_MatrixMult_alignedK2_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 2, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 2, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, float_MatrixMult_alignedK4_x)\n{\n  testMatMulp1<3, 4, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 4, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 4, 5, mm::MAT_OP::NONE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n  testMatMulp1<3, 4, 5, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::TRANSPOSE, mp1::Parallel1Dir::THREAD_X, float>(16, 1, 0);\n}\n\nTEST(MATH_UTILS, float_BasicMatrixMult_alpha_z)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, float>(16, 3.0, 0);\n}\n\nTEST(MATH_UTILS, float_BasicMatrixMult_alpha_beta_z)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_Z, float>(16, 3.0, 0.2);\n}\n\nTEST(MATH_UTILS, float_BasicMatrixMult_alpha_beta_x)\n{\n  testMatMulp1<3, 2, 5, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(5, 3.0, 0.2);\n}\n\nTEST(MATH_UTILS, HostMatrixMult)\n{\n  const int M = 10;\n  const int K = 5;\n  const int N = 6;\n  const mm::MAT_OP A_OP = mm::MAT_OP::NONE;\n  const mm::MAT_OP B_OP = mm::MAT_OP::NONE;\n  const mp1::Parallel1Dir P_DIR = mp1::Parallel1Dir::THREAD_X;\n  using T = float;\n  T alpha = 1.0;\n  T beta = 0.0;\n  testMatMulHost<M, K, N, A_OP, B_OP, P_DIR, T>(alpha, beta);\n}\nTEST(MATH_UTILS, SpeedSharedMemMatrixMult)\n{\n  const int NUM_ITERATIONS = 100;\n  float2 total_ms = make_float2(0, 0);\n  const int thread_dim = 1024;\n  for (int i = 0; i < NUM_ITERATIONS; i++)\n  {\n    total_ms +=\n        testMatMulp1<128, 256, 128, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(\n            thread_dim, 3.0, 0.2);\n  }\n  std::cout << NUM_ITERATIONS << \" GPU Shared Mem Matrix Multiplications took \" << total_ms.y / NUM_ITERATIONS\n            << \" ms on average\" << std::endl;\n  std::cout << NUM_ITERATIONS << \" CPU Shared Mem Matrix Multiplications took \" << total_ms.x / NUM_ITERATIONS\n            << \" ms on average\" << std::endl;\n}\n\nTEST(MATH_UTILS, SpeedLargeMatrixMult)\n{\n  const int NUM_ITERATIONS = 5;\n  float2 total_ms = make_float2(0, 0);\n  const int thread_dim = 1024;\n  for (int i = 0; i < NUM_ITERATIONS; i++)\n  {\n    total_ms +=\n        testMatMulp1<1024, 2048, 1024, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(\n            thread_dim, 3.0, 0.2);\n  }\n  std::cout << NUM_ITERATIONS << \" GPU Matrix Multiplications took \" << total_ms.y / NUM_ITERATIONS << \" ms on average\"\n            << std::endl;\n  std::cout << NUM_ITERATIONS << \" CPU Matrix Multiplications took \" << total_ms.x / NUM_ITERATIONS << \" ms on average\"\n            << std::endl;\n}\n\nTEST(MATH_UTILS, SpeedLargeMatrixMultGlobal)\n{\n  const int NUM_ITERATIONS = 10;\n  float2 total_ms = make_float2(0, 0);\n  const int thread_dim = 1024;\n  for (int i = 0; i < NUM_ITERATIONS; i++)\n  {\n    total_ms +=\n        testMatMulp1<1024, 2048, 1024, mm::MAT_OP::TRANSPOSE, mm::MAT_OP::NONE, mp1::Parallel1Dir::GLOBAL_X, float>(\n            thread_dim, 3.0, 0.2);\n  }\n  std::cout << NUM_ITERATIONS << \" GPU Matrix Multiplications took \" << total_ms.y / NUM_ITERATIONS << \" ms on average\"\n            << std::endl;\n  std::cout << NUM_ITERATIONS << \" CPU Matrix Multiplications took \" << total_ms.x / NUM_ITERATIONS << \" ms on average\"\n            << std::endl;\n}\n\nTEST(MATH_UTILS, SpeedSmallMatrixMult)\n{\n  const int NUM_ITERATIONS = 10;\n  float2 total_ms = make_float2(0, 0);\n  const int thread_dim = 16;\n  for (int i = 0; i < NUM_ITERATIONS; i++)\n  {\n    total_ms += testMatMulp1<5, 4, 3, mm::MAT_OP::NONE, mm::MAT_OP::NONE, mp1::Parallel1Dir::THREAD_X, float>(\n        thread_dim, 3.0, 0.2);\n  }\n  std::cout << NUM_ITERATIONS << \" GPU Matrix Multiplications took \" << total_ms.y / NUM_ITERATIONS << \" ms on average\"\n            << std::endl;\n  std::cout << NUM_ITERATIONS << \" CPU Matrix Multiplications took \" << total_ms.x / NUM_ITERATIONS << \" ms on average\"\n            << std::endl;\n}\n\ntemplate <int M, int N, mp1::Parallel1Dir PARALLELIZATION_DIR = mp1::Parallel1Dir::THREAD_Y, class T = float>\n__global__ void GaussJordanKernel(T* A_d)\n{\n  int batch_idx;\n  if (PARALLELIZATION_DIR == mp1::Parallel1Dir::THREAD_X)\n  {\n    batch_idx = blockIdx.x;\n  }\n  else if (PARALLELIZATION_DIR == mp1::Parallel1Dir::THREAD_Y)\n  {\n    batch_idx = blockIdx.y;\n  }\n  else if (PARALLELIZATION_DIR == mp1::Parallel1Dir::THREAD_Z)\n  {\n    batch_idx = blockIdx.z;\n  }\n  extern __shared__ float A_shared_s[];\n  T* A_shared = (T*)A_shared_s;\n  mp1::loadArrayParallel<M * N, PARALLELIZATION_DIR, T>(A_shared, 0, A_d + batch_idx * M * N, 0);\n  __syncthreads();\n  mm::GaussJordanElimination<M, N, PARALLELIZATION_DIR, T>(A_shared);\n  __syncthreads();\n  mp1::loadArrayParallel<M * N, PARALLELIZATION_DIR, T>(A_d + batch_idx * M * N, 0, A_shared, 0);\n}\n\nTEST(MATH_UTILS, GaussJordanFactorizationAgainstInverse)\n{\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n  const int M = 3;\n  const int N = 2 * M;\n  Eigen::MatrixXf A = Eigen::MatrixXf::Zero(M, N);\n  Eigen::MatrixXf A_result = A;\n  Eigen::MatrixXf first_A;\n  Eigen::MatrixXf A_inv;\n  dim3 block_size(1, 1, 1);\n  dim3 grid_size(1, 1, 1);\n  int shared_mem_size = sizeof(float) * M * N;\n  float* A_d;\n  cudaMalloc((void**)&A_d, sizeof(float) * M * N);\n\n  float max_error = 0.0f;\n  Eigen::MatrixXf diff;\n  first_A = Eigen::MatrixXf::Random(M, M);\n  A_inv = first_A.inverse();\n  for (int tdx = 1; tdx < 64; tdx++)\n  {\n    A << first_A, Eigen::MatrixXf::Identity(M, M);\n    cudaMemcpyAsync(A_d, A.data(), sizeof(float) * M * N, cudaMemcpyHostToDevice, stream);\n    block_size.x = tdx;\n    GaussJordanKernel<M, N, mp1::Parallel1Dir::THREAD_X><<<grid_size, block_size, shared_mem_size, stream>>>(A_d);\n    cudaMemcpyAsync(A_result.data(), A_d, sizeof(float) * M * N, cudaMemcpyDeviceToHost, stream);\n    cudaStreamSynchronize(stream);\n\n    diff = A_result.block(0, M, M, N - M) - A_inv;\n    max_error = diff.norm();\n    ASSERT_NEAR(max_error, 0.0f, sqrtf(1e-4 * M * N)) << \"failed for \" << tdx << \" parallel x threads.\\nDiff:\\n\"\n                                                      << diff << \"\\nCPU:\\n\"\n                                                      << A_inv << \"\\nGPU:\\n\"\n                                                      << A_result.block(0, M, M, N - M);\n  }\n  block_size = dim3(1, 1, 1);\n  for (int tdy = 1; tdy < 64; tdy++)\n  {\n    // first_A = Eigen::MatrixXf::Random(M, M);\n    A << first_A, Eigen::MatrixXf::Identity(M, M);\n    // A_inv = first_A.inverse();\n    cudaMemcpyAsync(A_d, A.data(), sizeof(float) * M * N, cudaMemcpyHostToDevice, stream);\n    block_size.y = tdy;\n    GaussJordanKernel<M, N, mp1::Parallel1Dir::THREAD_Y><<<grid_size, block_size, shared_mem_size, stream>>>(A_d);\n    cudaMemcpyAsync(A_result.data(), A_d, sizeof(float) * M * N, cudaMemcpyDeviceToHost, stream);\n    cudaStreamSynchronize(stream);\n    diff = A_result.block(0, M, M, N - M) - A_inv;\n    max_error = diff.lpNorm<Eigen::Infinity>();\n    ASSERT_NEAR(max_error, 0.0f, 1e-4 * A_inv.norm()) << \"failed for \" << tdy << \" parallel y threads.\\nDiff:\\n\"\n                                                      << diff << \"\\nCPU:\\n\"\n                                                      << A_inv << \"\\nGPU:\\n\"\n                                                      << A_result.block(0, M, M, N - M);\n  }\n  block_size = dim3(1, 1, 1);\n  for (int tdz = 1; tdz < 64; tdz++)\n  {\n    // first_A = Eigen::MatrixXf::Random(M, M);\n    A << first_A, Eigen::MatrixXf::Identity(M, M);\n    // A_inv = first_A.inverse();\n    cudaMemcpyAsync(A_d, A.data(), sizeof(float) * M * N, cudaMemcpyHostToDevice, stream);\n    block_size.z = tdz;\n    GaussJordanKernel<M, N, mp1::Parallel1Dir::THREAD_Z><<<grid_size, block_size, shared_mem_size, stream>>>(A_d);\n    cudaMemcpyAsync(A_result.data(), A_d, sizeof(float) * M * N, cudaMemcpyDeviceToHost, stream);\n    cudaStreamSynchronize(stream);\n    diff = A_result.block(0, M, M, N - M) - A_inv;\n    max_error = diff.lpNorm<Eigen::Infinity>();\n    ASSERT_NEAR(max_error, 0.0f, 1e-4 * A_inv.norm()) << \"failed for \" << tdz << \" parallel z threads.\\nDiff:\\n\"\n                                                      << diff << \"\\nCPU:\\n\"\n                                                      << A_inv << \"\\nGPU:\\n\"\n                                                      << A_result.block(0, M, M, N - M);\n  }\n  cudaFree(A_d);\n}\n\nTEST(MATH_UTILS, GaussJordanEliminationColumnSkip)\n{\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n  const int M = 2;\n  const int N = 5;\n  Eigen::MatrixXf A = Eigen::MatrixXf::Zero(M, N);\n  Eigen::MatrixXf A_result = A;\n  // A << 0, 2, 3, 1, 1, 4;\n  A << 1, 2, 2, 3, 4, 2, 4, 4, 1, 8;\n\n  Eigen::MatrixXf b_d = Eigen::MatrixXf::Zero(M, N - M);\n  // b_d << 2.5, 1.5;\n  b_d << 2, 0, 4, 0, 1, 0;\n  dim3 block_size(1, 1, 1);\n  dim3 grid_size(1, 1, 1);\n  int shared_mem_size = sizeof(float) * M * N;\n  float* A_d;\n  cudaMalloc((void**)&A_d, sizeof(float) * M * N);\n\n  float max_error = 0.0f;\n  Eigen::MatrixXf diff;\n  for (int tdx = 1; tdx < 65; tdx++)\n  {\n    cudaMemcpyAsync(A_d, A.data(), sizeof(float) * M * N, cudaMemcpyHostToDevice, stream);\n    block_size.x = tdx;\n    GaussJordanKernel<M, N, mp1::Parallel1Dir::THREAD_X><<<grid_size, block_size, shared_mem_size, stream>>>(A_d);\n    cudaMemcpyAsync(A_result.data(), A_d, sizeof(float) * M * N, cudaMemcpyDeviceToHost, stream);\n    cudaStreamSynchronize(stream);\n\n    diff = A_result.block(0, M, M, N - M) - b_d;\n    max_error = diff.norm();\n    ASSERT_NEAR(max_error, 0.0f, 1e-4 * M * N) << \"failed for \" << tdx << \" parallel x threads.\\nDiff:\\n\"\n                                               << diff << \"\\nCPU:\\n\"\n                                               << b_d << \"\\nGPU:\\n\"\n                                               << A_result.block(0, M, M, N - M);\n  }\n  cudaFree(A_d);\n}\n\nTEST(MATH_UTILS, GaussJordanEliminationRowSwap)\n{\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n  const int M = 2;\n  const int N = 3;\n  Eigen::MatrixXf A = Eigen::MatrixXf::Zero(M, N);\n  Eigen::MatrixXf A_result = A;\n  A << 0, 2, 3, 1, 1, 4;\n\n  Eigen::MatrixXf b_d = Eigen::MatrixXf::Zero(M, N - M);\n  b_d << 2.5, 1.5;\n  dim3 block_size(1, 1, 1);\n  dim3 grid_size(1, 1, 1);\n  int shared_mem_size = sizeof(float) * M * N;\n  float* A_d;\n  cudaMalloc((void**)&A_d, sizeof(float) * M * N);\n\n  float max_error = 0.0f;\n  Eigen::MatrixXf diff;\n  for (int tdx = 1; tdx < 65; tdx++)\n  {\n    cudaMemcpyAsync(A_d, A.data(), sizeof(float) * M * N, cudaMemcpyHostToDevice, stream);\n    block_size.x = tdx;\n    GaussJordanKernel<M, N, mp1::Parallel1Dir::THREAD_X><<<grid_size, block_size, shared_mem_size, stream>>>(A_d);\n    cudaMemcpyAsync(A_result.data(), A_d, sizeof(float) * M * N, cudaMemcpyDeviceToHost, stream);\n    cudaStreamSynchronize(stream);\n\n    diff = A_result.block(0, M, M, N - M) - b_d;\n    max_error = diff.norm();\n    ASSERT_NEAR(max_error, 0.0f, 1e-4 * M * N) << \"failed for \" << tdx << \" parallel x threads.\\nDiff:\\n\"\n                                               << diff << \"\\nCPU:\\n\"\n                                               << b_d << \"\\nGPU:\\n\"\n                                               << A_result.block(0, M, M, N - M);\n  }\n  cudaFree(A_d);\n}\n\nTEST(MATH_UTILS, GaussJordanFactorizationBatched)\n{\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n  const int M = 3;\n  const int N = 2 * M;\n  dim3 block_size(1, 1, 1);\n  dim3 grid_size(1, 1, 1);\n  int shared_mem_size = sizeof(double) * M * N;\n  double* A_d = nullptr;\n  // cudaMalloc((void**)&A_d, sizeof(double) * M * N);\n  for (int batches = 1; batches < 1000; batches++)\n  {\n    std::vector<double> A_batch(batches * M * N);\n    std::vector<double> A_cpu(batches * M * N);\n    std::vector<double> A_gpu(batches * M * N);\n    if (A_d)\n    {\n      cudaFree(A_d);\n    }\n    cudaMalloc((void**)&A_d, sizeof(double) * batches * M * N);\n\n    grid_size.x = batches;\n    // Fill in A_batch\n    for (int i = 0; i < batches; i++)\n    {\n      Eigen::Map<Eigen::MatrixXd> A_batch_i(&A_batch.data()[i * M * N], M, N);\n      Eigen::Map<Eigen::MatrixXd> A_cpu_i(&A_cpu.data()[i * M * N], M, N);\n      A_batch_i << Eigen::MatrixXd::Random(M, M) + Eigen::MatrixXd::Identity(M, M), Eigen::MatrixXd::Identity(M, M);\n      A_cpu_i << Eigen::MatrixXd::Identity(M, M), A_batch_i.block(0, 0, M, M).inverse();\n    }\n\n    for (int tdx = 1; tdx < 128; tdx++)\n    {\n      // Copy over to GPU and run GJ Elimination\n      cudaMemcpyAsync(A_d, A_batch.data(), sizeof(double) * batches * M * N, cudaMemcpyHostToDevice, stream);\n      block_size.x = tdx;\n      GaussJordanKernel<M, N, mp1::Parallel1Dir::THREAD_X><<<grid_size, block_size, shared_mem_size, stream>>>(A_d);\n      cudaMemcpyAsync(A_gpu.data(), A_d, sizeof(double) * batches * M * N, cudaMemcpyDeviceToHost, stream);\n      cudaStreamSynchronize(stream);\n      for (int i = 0; i < A_gpu.size(); i++)\n      {\n        // double tolerance = fabsf(A_cpu[i]) < 1 ? 1e-3 : 1e-3 * fabsf(A_cpu[i]);\n        double tolerance = 0.1f;\n        ASSERT_LT(fabsf(A_gpu[i] - A_cpu[i]), tolerance)\n            << i % (M * N) << \"th item in \"\n            << \" batch: \" << i / (M * N) << \" out of \" << batches << \", CPU: \" << A_cpu[i] << \", GPU: \" << A_gpu[i];\n      }\n    }\n  }\n  cudaFree(A_d);\n}\n\n/** 2 Dimensional Parallelization Tests **/\n\ntemplate <int M, int K, int N, mp2::Parallel2Dir PARALLELIZATION_DIR>\n__global__ void gemm2d(const float* A, const float* B, float* C)\n{\n  mm::gemm2<M, K, N, PARALLELIZATION_DIR>(A, B, C);\n}\n\nTEST(MATH_UTILS, DISABLED_Matrix3fMultiplicationP2)\n{\n  const int M = 300;\n  const int K = 30;\n  const int N = 3;\n  const int max_thread_size = 1;\n  typedef Eigen::Matrix<float, M, K> A_mat;\n  typedef Eigen::Matrix<float, K, N> B_mat;\n  typedef Eigen::Matrix<float, M, N> C_mat;\n\n  cudaEvent_t start;\n  cudaEvent_t stop;\n  cudaEventCreate(&start);\n  cudaEventCreate(&stop);\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n\n  A_mat A = A_mat::Random();\n  B_mat B = B_mat::Random();\n  C_mat C_CPU = A * B;\n  C_mat C_GPU;\n  // std::cout << \"C_CPU:\\n\" << C_CPU << std::endl;\n  float* A_d;\n  float* B_d;\n  float* C_d;\n  dim3 block(1, 1, 1);\n  dim3 grid(1, 1, 1);\n\n  HANDLE_ERROR(cudaMalloc((void**)&A_d, sizeof(float) * A.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&B_d, sizeof(float) * B.size()));\n  HANDLE_ERROR(cudaMalloc((void**)&C_d, sizeof(float) * C_CPU.size()));\n\n  auto inner_loop_before = [&](int size1, int size2, mp2::Parallel2Dir P_DIR) {\n    A = A_mat::Random();\n    B = B_mat::Random();\n    C_CPU = A * B;\n    block = dim3(1, 1, 1);\n    if (P_DIR == mp2::Parallel2Dir::THREAD_XY)\n    {\n      block.x = size1;\n      block.y = max(size2, 1);\n    }\n    else if (P_DIR == mp2::Parallel2Dir::THREAD_XZ)\n    {\n      block.x = size1;\n      block.z = max(min(size2, 64), 1);\n    }\n    else if (P_DIR == mp2::Parallel2Dir::THREAD_YX)\n    {\n      block.y = size1;\n      block.x = max(size2, 1);\n    }\n    else if (P_DIR == mp2::Parallel2Dir::THREAD_YZ)\n    {\n      block.y = size1;\n      block.z = max(min(size2, 64), 1);\n    }\n    else if (P_DIR == mp2::Parallel2Dir::THREAD_ZY)\n    {\n      block.z = max(min(size1, 64), 1);\n      block.y = max(size2, 1);\n    }\n    else if (P_DIR == mp2::Parallel2Dir::THREAD_ZX)\n    {\n      block.z = max(min(size1, 64), 1);\n      block.x = max(size2, 1);\n    }\n    cudaEventRecord(start, stream);\n    HANDLE_ERROR(cudaMemcpyAsync(A_d, A.data(), sizeof(float) * A.size(), cudaMemcpyHostToDevice, stream));\n    HANDLE_ERROR(cudaMemcpyAsync(B_d, B.data(), sizeof(float) * B.size(), cudaMemcpyHostToDevice, stream));\n  };\n\n  auto inner_loop_after = [&](int size1, int size2, mp2::Parallel2Dir P_DIR) {\n    HANDLE_ERROR(cudaMemcpyAsync(C_GPU.data(), C_d, sizeof(float) * C_GPU.size(), cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n    cudaEventRecord(stop, stream);\n    float duration = 0;\n    cudaEventElapsedTime(&duration, start, stop);\n    // std::cout << \"C_GPU \" << \" with \" << block_x_size << \" parallelization:\\n\" << C_GPU << std::endl;\n    // std::cout << block_x_size << \" parallelization diff \" << duration << \" ms:\\n\" << C_CPU - C_GPU << std::endl;\n    double avg_err = (C_CPU - C_GPU).norm() / C_CPU.size();\n    // std::cout << block_x_size << \" parallelization diff \" << duration << \" ms: \" << avg_err << std::endl;\n    // std::string dir_s = \"none\";\n    // if (P_DIR == mp2::Parallel2Dir::THREAD_XY)\n    // {\n    //   dir_s = \"xy\";\n    // } else if (P_DIR == mp2::Parallel2Dir::THREAD_XZ)\n    // {\n    //   dir_s = \"xz\";\n    // } else if (P_DIR == mp2::Parallel2Dir::THREAD_YX)\n    // {\n    //   dir_s = \"yx\";\n    // } else if (P_DIR == mp2::Parallel2Dir::THREAD_YZ)\n    // {\n    //   dir_s = \"yz\";\n    // } else if (P_DIR == mp2::Parallel2Dir::THREAD_ZY)\n    // {\n    //   dir_s = \"zy\";\n    // } else if (P_DIR == mp2::Parallel2Dir::THREAD_ZX)\n    // {\n    //   dir_s = \"zx\";\n    // }\n    // printf(\"%d-%d-%d parallelization in %s-dir, time elapsed %f ms, diff: %f\\n\", block.x, block.y, block.z,\n    // dir_s.c_str(), duration, avg_err);\n    printf(\"%d-%d-%d parallelization, time elapsed %f ms, diff: %f\\n\", block.x, block.y, block.z, duration, avg_err);\n    ASSERT_TRUE(avg_err < 1e-07);\n  };\n\n  for (int block_size_1 = 1; block_size_1 <= max_thread_size; block_size_1++)\n  {\n    int block_size_2 = max_thread_size - block_size_1;\n    inner_loop_before(block_size_1, block_size_2, mp2::Parallel2Dir::THREAD_XY);\n    gemm2d<M, K, N, mp2::Parallel2Dir::THREAD_XY><<<grid, block, 0, stream>>>(A_d, B_d, C_d);\n    inner_loop_after(block_size_1, block_size_2, mp2::Parallel2Dir::THREAD_XY);\n  }\n  for (int block_size_1 = 1; block_size_1 <= max_thread_size; block_size_1++)\n  {\n    int block_size_2 = max_thread_size - block_size_1;\n    inner_loop_before(block_size_1, block_size_2, mp2::Parallel2Dir::THREAD_XZ);\n    gemm2d<M, K, N, mp2::Parallel2Dir::THREAD_XZ><<<grid, block, 0, stream>>>(A_d, B_d, C_d);\n    inner_loop_after(block_size_1, block_size_2, mp2::Parallel2Dir::THREAD_XZ);\n  }\n  for (int block_size_1 = 1; block_size_1 <= max_thread_size; block_size_1++)\n  {\n    int block_size_2 = max_thread_size - block_size_1;\n    inner_loop_before(block_size_1, block_size_2, mp2::Parallel2Dir::THREAD_YZ);\n    gemm2d<M, K, N, mp2::Parallel2Dir::THREAD_YZ><<<grid, block, 0, stream>>>(A_d, B_d, C_d);\n\n    inner_loop_after(block_size_1, block_size_2, mp2::Parallel2Dir::THREAD_YZ);\n    // std::cout << \"C_GPU:\\n\" << C_GPU << \"\\nC_CPU:\\n\" << C_CPU << std::endl;\n  }\n\n  cudaFree(A_d);\n  cudaFree(B_d);\n  cudaFree(C_d);\n}\n"
  },
  {
    "path": "tests/misc/CMakeLists.txt",
    "content": "# Double integrator dynamics tests\nset(TARGET_NAME miscellaneous_tests)\n\nadd_executable(${TARGET_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp miscellaneous_tests.cu)\ntarget_link_libraries(${TARGET_NAME} gtest gmock gtest_main double_integrator_mppi)\nset_target_properties(${TARGET_NAME} PROPERTIES FOLDER test)\n\ngtest_add_tests(TARGET miscellaneous_tests)\n\n"
  },
  {
    "path": "tests/misc/di_dynamics_kernel_tests.cu",
    "content": "#include \"di_dynamics_kernel_tests.cuh\"\n\n__global__ void CheckModelSize(DoubleIntegratorDynamics* DI, long* model_size_check)\n{\n  model_size_check[0] = sizeof(*DI);\n}\n"
  },
  {
    "path": "tests/misc/di_dynamics_kernel_tests.cuh",
    "content": "#pragma once\n\n#ifndef KERNEL_TESTS_DYNAMICS_DOUBLE_INTEGRATOR_DI_KERNEL_TEST_CUH_\n#define KERNEL_TESTS_DYNAMICS_DOUBLE_INTEGRATOR_DI_KERNEL_TEST_CUH_\n\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n\n__global__ void CheckModelSize(DoubleIntegratorDynamics* DI, long* model_size_check);\n\n#include \"di_dynamics_kernel_tests.cu\"\n\n#endif  //! KERNEL_TESTS_DYNAMICS_CARTPOLE_CARTPOLE_KERNEL_TEST_CUH_"
  },
  {
    "path": "tests/misc/miscellaneous_tests.cu",
    "content": "//\n// Created by mgandhi3 on 3/4/20.\n//\n#include <gtest/gtest.h>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include \"di_dynamics_kernel_tests.cuh\"\n#include <memory>\n#include <random>\n#include <Eigen/Dense>\n\nTEST(Miscellaneous, CompareModelSize)\n{\n  auto model = std::make_shared<DoubleIntegratorDynamics>();\n\n  model->GPUSetup();\n\n  long* model_size_GPU_d;\n\n  HANDLE_ERROR(cudaMalloc((void**)&model_size_GPU_d, sizeof(long)));\n\n  CheckModelSize<<<1, 1>>>(model->model_d_, model_size_GPU_d);\n  CudaCheckError();\n\n  long model_size_GPU;\n  HANDLE_ERROR(cudaMemcpy(&model_size_GPU, model_size_GPU_d, sizeof(long), cudaMemcpyDeviceToHost));\n\n  ASSERT_EQ(sizeof(*model), model_size_GPU);\n\n  std::cout << \"Size of the shared pointer to the model:\" << sizeof(model)\n            << std::endl;  // Should be the size of a pointer so 8 bytes for a 64 bit system\n\n  std::cout << \"Size of the model itself: \" << sizeof(*model) << std::endl;  // Should be bigger?\n\n  std::cout << \"Size of the model on the GPU: \" << model_size_GPU << std::endl;\n\n  std::cout << \"Size of the control ranges in the model: \" << sizeof(model->control_rngs_)\n            << std::endl;  // Should be 16 bytes ie. 4 floats!\n\n  std::cout << \"Size of the parameter structure of the model: \" << sizeof(DoubleIntegratorParams) << std::endl;\n\n  std::cout << \"Size of the device pointer of the model: \" << sizeof(model->model_d_) << std::endl;\n\n  std::cout << \"Size of the stream: \" << sizeof(model->stream_) << std::endl;\n\n  std::cout << \"Size of GPU Memstatus: \" << sizeof(model->GPUMemStatus_) << std::endl;\n}\n\nTEST(Miscellaneous, EigenNormalRandomVector)\n{\n  std::random_device rd;\n  std::mt19937 gen(rd());  // here you could also set a seed\n  std::normal_distribution<float> dis(1, 2);\n\n  // generate a matrix expression\n  Eigen::MatrixXd M = Eigen::MatrixXd::NullaryExpr(100, 100, [&]() { return dis(gen); });\n\n  EXPECT_NEAR(M.mean(), 1, 1e-1);\n\n  EXPECT_NEAR(sqrtf((M.array() * M.array()).mean() - M.mean() * M.mean()), 2, 1e-1);\n}\n\nTEST(Miscellaneous, CreateRandomStateArray)\n{\n  DoubleIntegratorDynamics::state_array X;\n  DoubleIntegratorDynamics::state_array temp = DoubleIntegratorDynamics::state_array::Zero();\n\n  std::random_device rd;\n  std::mt19937 gen(rd());  // here you could also set a seed\n  std::normal_distribution<float> dis(1, 2);\n\n  // generate a matrix expression\n  X = DoubleIntegratorDynamics::state_array::NullaryExpr([&]() { return dis(gen); });\n\n  std::cout << temp + X * 0.01 << std::endl;\n}\n\nTEST(Miscellaneous, Smoothing)\n{\n  Eigen::Matrix<float, 1, 5> filter_coefficients;\n  filter_coefficients << -3, 12, 17, 12, -3;\n  filter_coefficients /= 35.0;\n\n  Eigen::Matrix<float, 14, 3> control_buffer = Eigen::Matrix<float, 14, 3>::Ones();\n\n  std::cout << \"previous control buffer\" << std::endl;\n  std::cout << control_buffer << std::endl;\n\n  Eigen::Matrix<float, 2, 3> control_history;\n  control_history << 1, 2, 3, 4, 5, 6;\n\n  control_buffer.topRows<2>() = control_history;\n\n  Eigen::Matrix<float, 3, 10> nominal_control = 5 * Eigen::Matrix<float, 3, 10>::Ones();\n\n  // Fill the last two timesteps with the end of the current nominal control trajectory\n  nominal_control.col(9) << 10, 10, 10;\n  control_buffer.middleRows(2, 10) = nominal_control.transpose();\n\n  control_buffer.row(10 + 2) = nominal_control.transpose().row(10 - 1);\n  control_buffer.row(10 + 3) = nominal_control.transpose().row(10 - 1);\n\n  std::cout << \"current control buffer\" << std::endl;\n  std::cout << control_buffer << std::endl;\n\n  // Apply convolutional filter to each timestep\n  for (int i = 0; i < 10; ++i)\n  {\n    nominal_control.col(i) = (filter_coefficients * control_buffer.middleRows(i, 5)).transpose();\n  }\n\n  std::cout << nominal_control << std::endl;\n}"
  },
  {
    "path": "tests/mppi_core/CCM_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/controllers/R-MPPI/robust_mppi_controller.cuh>\n#include <mppi/core/rmppi_kernel_test.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_robust_cost.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/feedback_controllers/CCM/ccm.h>\n#include <mppi/utils/test_helper.h>\n#include <cnpy.h>\n\nconst int NUM_TIMESTEPS = 50;\nconst int NUM_ROLLOUTS_CONST = 1024;\n\n// Might be simpler to create a new Controller CLass from RMPPI\ntemplate <class DYN_T = DoubleIntegratorDynamics, class COST_T = DoubleIntegratorCircleCost,\n          int MAX_TIMESTEPS = NUM_TIMESTEPS, int NUM_ROLLOUTS = NUM_ROLLOUTS_CONST, int B_X = 64, int B_Y = 1,\n          int S = 1>\nclass RMPPICCMDoubleIntegratorController\n  : public RobustMPPIController<DYN_T, COST_T, ccm::LinearCCM<DYN_T, MAX_TIMESTEPS>, MAX_TIMESTEPS, NUM_ROLLOUTS, B_X,\n                                B_Y, RobustMPPIParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>, S>\n{\nprotected:\n  std::mt19937 rng_gen_;\n  std::normal_distribution<float> control_dist_;\n  ccm::LinearCCM<DYN_T, MAX_TIMESTEPS> CCM_feedback_controller_;\n\npublic:\n  typedef RobustMPPIController<DYN_T, COST_T, ccm::LinearCCM<DYN_T, MAX_TIMESTEPS>, MAX_TIMESTEPS, NUM_ROLLOUTS, B_X,\n                               B_Y, RobustMPPIParams<DYN_T::STATE_DIM, DYN_T::CONTROL_DIM, MAX_TIMESTEPS>, S>\n      PARENT_CLASS;\n\n  using control_array = typename PARENT_CLASS::control_array;\n  using control_trajectory = typename PARENT_CLASS::control_trajectory;\n  using state_array = typename PARENT_CLASS::state_array;\n\n  // Constructor... Yeah It ain't pretty\n  RMPPICCMDoubleIntegratorController(\n      DYN_T* model, COST_T* cost, ccm::LinearCCM<DYN_T, MAX_TIMESTEPS>* fb_controller, float dt, float lambda,\n      float alpha, float value_function_threshold, const Eigen::Ref<const control_array>& control_std_dev,\n      int num_timesteps = MAX_TIMESTEPS,\n      const Eigen::Ref<const control_trajectory>& init_control_traj = control_trajectory::Zero(),\n      int num_candidate_nominal_states = 9, int optimization_stride = 1, cudaStream_t stream = nullptr)\n    : PARENT_CLASS(model, cost, fb_controller, dt, 1, lambda, alpha, value_function_threshold, control_std_dev,\n                   num_timesteps, init_control_traj, num_candidate_nominal_states, optimization_stride, stream)\n  {\n    control_dist_ = std::normal_distribution<float>(0, 1);\n    CCM_feedback_controller_ = ccm::LinearCCM<DYN_T, MAX_TIMESTEPS>(model);\n  }\n\n  void ptrToVec(const float* input, int num, std::vector<float>& output)\n  {\n    output.assign(input, input + num);\n    if (output.size() != num)\n    {\n      output.assign(num, 0.0);\n      for (int i = 0; i < num; i++)\n      {\n        output[i] = input[i];\n      }\n    }\n  }\n\n  void computeControl(const Eigen::Ref<const state_array>& state, int optimization_stride = 1) override\n  {\n    std::cout << \"Candidate chosen: \" << this->best_index_ << std::endl;\n    // Rewrite computeControl using the CCM Rollout Kernel\n    int c_dim = DYN_T::CONTROL_DIM;\n    int s_dim = DYN_T::STATE_DIM;\n    int single_control_traj_size = this->getNumTimesteps() * c_dim;\n    int multi_control_traj_size = NUM_ROLLOUTS * single_control_traj_size;\n\n    // Handy dandy pointers to nominal data\n    float* trajectory_costs_nominal_d = this->trajectory_costs_d_ + NUM_ROLLOUTS;\n    float* initial_state_nominal_d = this->initial_state_d_ + s_dim;\n    float* control_noise_nominal_d = this->control_noise_d_ + multi_control_traj_size;\n    float* control_nominal_d = this->control_d_ + single_control_traj_size;\n    for (int opt_iter = 0; opt_iter < this->getNumIters(); opt_iter++)\n    {\n      // Create noise for trajectories\n      std::vector<float> control_noise_vec(multi_control_traj_size * 2, 0);\n      for (int i = 0; i < multi_control_traj_size; i++)\n      {\n        control_noise_vec[i] = control_dist_(rng_gen_);\n        control_noise_vec[multi_control_traj_size + i] = control_noise_vec[i];\n      }\n      std::vector<float> x_init_act_vec, x_init_nom_vec, u_traj_vec;\n      std::vector<float> control_std_dev_vec;\n      ptrToVec(state.data(), s_dim, x_init_act_vec);\n      ptrToVec(this->nominal_state_.data(), s_dim, x_init_nom_vec);\n      ptrToVec(this->nominal_control_trajectory_.data(), single_control_traj_size, u_traj_vec);\n      ptrToVec(this->getControlStdDev().data(), c_dim, control_std_dev_vec);\n\n      // Launch rollout kernel using CCM\n      // TODO pass in alpha\n      std::array<float, NUM_ROLLOUTS> costs_act_CPU, costs_nom_CPU;\n      launchRMPPIRolloutKernelCCMCPU<DYN_T, COST_T, MAX_TIMESTEPS, NUM_ROLLOUTS>(\n          this->model_, this->cost_, &CCM_feedback_controller_, this->getDt(), this->getNumTimesteps(),\n          optimization_stride, this->getLambda(), this->getAlpha(), this->getValueFunctionThreshold(), x_init_nom_vec,\n          x_init_act_vec, control_std_dev_vec, u_traj_vec, control_noise_vec, costs_act_CPU, costs_nom_CPU);\n\n      for (int i = 0; i < NUM_ROLLOUTS; i++)\n      {\n        this->trajectory_costs_(i) = costs_act_CPU[i];\n        this->trajectory_costs_nominal_(i) = costs_nom_CPU[i];\n      }\n      // Control noise should be modified to contain u + noise\n      this->setBaseline(mppi_common::computeBaselineCost(this->trajectory_costs_.data(), NUM_ROLLOUTS), 0);\n      this->setBaseline(mppi_common::computeBaselineCost(this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS), 1);\n\n      // Copy data over to GPU\n      HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_d_, this->trajectory_costs_.data(),\n                                   NUM_ROLLOUTS * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n\n      HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_nominal_d, this->trajectory_costs_nominal_.data(),\n                                   NUM_ROLLOUTS * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n\n      HANDLE_ERROR(cudaMemcpyAsync(this->control_noise_d_, control_noise_vec.data(),\n                                   multi_control_traj_size * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n\n      HANDLE_ERROR(cudaMemcpyAsync(control_noise_nominal_d, control_noise_vec.data() + multi_control_traj_size,\n                                   multi_control_traj_size * sizeof(float), cudaMemcpyHostToDevice, this->stream_));\n\n      // After rollout kernel, control_d_ and nominal_control_d are written to\n      // and not read from so there is nothing to copy to them\n      // HANDLE_ERROR(cudaMemcpyAsync(this->control_d_,\n      //                              this->nominal_control_trajectory_.data(),\n      //                              single_control_traj_size * sizeof(float),\n      //                              cudaMemcpyHostToDevice, this->stream_));\n\n      // // TODO Not done in RMPPI RolloutKernel but I think it should be\n      // HANDLE_ERROR(cudaMemcpyAsync(control_nominal_d,\n      //                              this->nominal_control_trajectory_.data(),\n      //                              single_control_traj_size * sizeof(float),\n      //                              cudaMemcpyHostToDevice, this->stream_));\n\n      HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n      // In this case this->gamma = 1 / lambda\n      mppi_common::launchNormExpKernel(NUM_ROLLOUTS, B_X, this->trajectory_costs_d_, 1.0 / this->getLambda(),\n                                       this->getBaselineCost(0), this->stream_);\n      mppi_common::launchNormExpKernel(NUM_ROLLOUTS, B_X, trajectory_costs_nominal_d, 1.0 / this->getLambda(),\n                                       this->getBaselineCost(1), this->stream_);\n\n      HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_.data(), this->trajectory_costs_d_,\n                                   NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n      HANDLE_ERROR(cudaMemcpyAsync(this->trajectory_costs_nominal_.data(), trajectory_costs_nominal_d,\n                                   NUM_ROLLOUTS * sizeof(float), cudaMemcpyDeviceToHost, this->stream_));\n      HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n      this->setNormalizer(mppi_common::computeNormalizer(this->trajectory_costs_.data(), NUM_ROLLOUTS), 0);\n      this->setNormalizer(mppi_common::computeNormalizer(this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS), 1);\n\n      // Compute real free energy\n      mppi_common::computeFreeEnergy(this->free_energy_statistics_.real_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.real_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.real_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_.data(), NUM_ROLLOUTS, this->getBaselineCost(0),\n                                     this->getLambda());\n\n      // Compute Nominal State free Energy\n      mppi_common::computeFreeEnergy(this->free_energy_statistics_.nominal_sys.freeEnergyMean,\n                                     this->free_energy_statistics_.nominal_sys.freeEnergyVariance,\n                                     this->free_energy_statistics_.nominal_sys.freeEnergyModifiedVariance,\n                                     this->trajectory_costs_nominal_.data(), NUM_ROLLOUTS, this->getBaselineCost(1),\n                                     this->getLambda());\n\n      mppi_common::launchWeightedReductionKernel<DYN_T, NUM_ROLLOUTS, B_X>(\n          this->trajectory_costs_d_, this->control_noise_d_, this->control_d_, this->getNormalizerCost(0),\n          this->getNumTimesteps(), this->stream_);\n      mppi_common::launchWeightedReductionKernel<DYN_T, NUM_ROLLOUTS, B_X>(\n          trajectory_costs_nominal_d, control_noise_nominal_d, control_nominal_d, this->getNormalizerCost(1),\n          this->getNumTimesteps(), this->stream_);\n      // Transfer the new control to the host\n      HANDLE_ERROR(cudaMemcpyAsync(this->control_.data(), this->control_d_, sizeof(float) * single_control_traj_size,\n                                   cudaMemcpyDeviceToHost, this->stream_));\n      HANDLE_ERROR(cudaMemcpyAsync(this->nominal_control_trajectory_.data(), control_nominal_d,\n                                   sizeof(float) * single_control_traj_size, cudaMemcpyDeviceToHost, this->stream_));\n      cudaStreamSynchronize(this->stream_);\n    }\n    this->computeStateTrajectoryHelper(this->nominal_state_trajectory_, this->nominal_state_,\n                                       this->nominal_control_trajectory_);\n\n    // Ugly hack for computeDF() method\n    this->propagated_feedback_state_trajectory_.col(0) = state;\n\n    this->free_energy_statistics_.real_sys.normalizerPercent = this->getNormalizerCost(0) / NUM_ROLLOUTS;\n    this->free_energy_statistics_.real_sys.increase =\n        this->getBaselineCost(0) - this->free_energy_statistics_.real_sys.previousBaseline;\n    this->free_energy_statistics_.nominal_sys.normalizerPercent = this->getNormalizerCost(1) / NUM_ROLLOUTS;\n    this->free_energy_statistics_.nominal_sys.increase =\n        this->getBaselineCost(1) - this->free_energy_statistics_.nominal_sys.previousBaseline;\n  }\n\n  // Ugly hack for computeDF() method\n  void setPropogatedFeedbackState(const Eigen::Ref<const state_array>& next_real_state)\n  {\n    this->propagated_feedback_state_trajectory_.col(1) = next_real_state;\n  }\n\n  void computeNominalFeedbackGains(const Eigen::Ref<const state_array>& state) override\n  {\n  }\n\n  control_array getCCMFeedbackGains(const Eigen::Ref<const state_array>& x_act,\n                                    const Eigen::Ref<const state_array>& x_nom,\n                                    const Eigen::Ref<const control_array>& u_nom)\n  {\n    control_array fb_u = CCM_feedback_controller_.u_feedback(x_act, x_nom, u_nom);\n    std::cout << \"Act: \" << x_act.transpose() << std::endl;\n    std::cout << \"nom: \" << x_nom.transpose() << std::endl;\n    std::cout << \"U: \" << u_nom.transpose() << std::endl;\n    std::cout << \"Feedback: \" << fb_u.transpose() << std::endl;\n    return fb_u;\n  }\n};\n\nbool tubeFailure(float* s)\n{\n  float inner_path_radius2 = 1.675 * 1.675;\n  float outer_path_radius2 = 2.325 * 2.325;\n  float radial_position = s[0] * s[0] + s[1] * s[1];\n  if ((radial_position < inner_path_radius2) || (radial_position > outer_path_radius2))\n  {\n    return true;\n  }\n  else\n  {\n    return false;\n  }\n}\n\nvoid saveTraj(const Eigen::Ref<const Eigen::Matrix<float, DoubleIntegratorDynamics::STATE_DIM, NUM_TIMESTEPS>>& traj,\n              int t, std::vector<float>& vec)\n{\n  for (int i = 0; i < NUM_TIMESTEPS; i++)\n  {\n    for (int j = 0; j < DoubleIntegratorDynamics::STATE_DIM; j++)\n    {\n      vec[t * NUM_TIMESTEPS * DoubleIntegratorDynamics::STATE_DIM + i * DoubleIntegratorDynamics::STATE_DIM + j] =\n          traj(j, i);\n    }\n  }\n}\n\nvoid saveState(const Eigen::Ref<const DoubleIntegratorDynamics::state_array>& state, int t, std::vector<float>& vec)\n{\n  for (int j = 0; j < DoubleIntegratorDynamics::STATE_DIM; j++)\n  {\n    vec[t * DoubleIntegratorDynamics::STATE_DIM + j] = state(j);\n  }\n}\n\nTEST(CCMTest, CCMFeedbackTest)\n{\n  using DYN = DoubleIntegratorDynamics;\n  using COST = DoubleIntegratorCircleCost;\n  DYN model(100);\n  ccm::LinearCCM<DYN> fb_controller(&model);\n  float dt = 0.02;\n  int mission_length = int(10 / dt);\n\n  DYN::state_array x, x_nom, x_dot;\n  x << 4, 0, 0, 1;\n  x_nom << -3, 2, 0, 0;\n  DYN::control_array current_control;\n\n  float two_percent_settle_time = -1;\n\n  for (int t = 0; t < mission_length; t++)\n  {\n    current_control = fb_controller.u_feedback(x, x_nom, DYN::control_array::Zero());\n    model.computeDynamics(x, current_control, x_dot);\n    model.updateState(x, x_dot, dt);\n\n    DYN::state_array abs_diff = x - x_nom;\n    for (int i = 0; i < DYN::STATE_DIM; i++)\n    {\n      if (x_nom(i) >= 1)\n      {\n        abs_diff(i) /= x_nom(i);\n      }\n    }\n    abs_diff = abs_diff.cwiseAbs();\n    if (abs_diff.block<2, 1>(0, 0).maxCoeff() < 0.02 && two_percent_settle_time < 0)\n    {\n      two_percent_settle_time = t * dt;\n    }\n\n    if (t % 5 == 0)\n    {\n      std::cout << \"State at t = \" << t * dt << \": \" << x.transpose() << std::endl;\n    }\n  }\n  std::cout << \"2% settling time is \" << two_percent_settle_time << \" secs\" << std::endl;\n}\n\nTEST(CCMTest, RMPPIRolloutKernel)\n{\n  // GTEST_SKIP();\n  using DYN = DoubleIntegratorDynamics;\n  using COST = DoubleIntegratorRobustCost;\n  DYN model(100);\n  COST cost;\n  auto params = cost.getParams();\n  params.crash_cost = 100;\n  cost.setParams(params);\n  const int num_timesteps = NUM_TIMESTEPS;\n  const int num_rollouts = NUM_ROLLOUTS_CONST;\n\n  using CONTROLLER = RMPPICCMDoubleIntegratorController<DYN, COST, num_timesteps, num_rollouts>;\n\n  const int state_dim = DYN::STATE_DIM;\n  const int control_dim = DYN::CONTROL_DIM;\n\n  const float dt = 0.02;\n  // int max_iter = 10;\n  float lambda = 4;\n  float alpha = 0;\n  float value_func_threshold = 20;\n\n  const int mission_length = int(0.06 / dt);\n\n  // Create a random number generator\n  // Random number generator for system noise\n  std::mt19937 gen;  // Standard mersenne_twister_engine which will be seeded\n  std::normal_distribution<float> normal_distribution;\n  gen.seed(7);  // Seed the 7, so everyone gets the same noise\n  normal_distribution = std::normal_distribution<float>(0, 1);\n\n  Eigen::Matrix<float, DYN::STATE_DIM, mission_length> universal_noise =\n      Eigen::Matrix<float, DYN::STATE_DIM, mission_length>::Zero();\n\n  // Create the noise for all systems\n  for (int t = 0; t < mission_length; ++t)\n  {\n    for (int i = 2; i < 4; ++i)\n    {\n      universal_noise(i, t) = normal_distribution(gen);\n    }\n  }\n\n  // std::vector<float> actual_trajectory_save(num_timesteps*mission_length*DYN::STATE_DIM);\n  // std::vector<float> nominal_trajectory_save(num_timesteps*mission_length*DYN::STATE_DIM);\n\n  // Save actual trajectories, nominal_trajectory, free energy\n  std::vector<float> robust_rc_trajectory(DYN::STATE_DIM * mission_length, 0);\n  std::vector<float> robust_rc_nominal_traj(DYN::STATE_DIM * num_timesteps * mission_length, 0);\n  std::vector<float> robust_rc_nominal_free_energy(mission_length, 0);\n  std::vector<float> robust_rc_real_free_energy(mission_length, 0);\n  std::vector<float> robust_rc_nominal_free_energy_bound(mission_length, 0);\n  std::vector<float> robust_rc_real_free_energy_bound(mission_length, 0);\n  std::vector<float> robust_rc_real_free_energy_growth_bound(mission_length, 0);\n  std::vector<float> robust_rc_nominal_free_energy_growth(mission_length, 0);\n  std::vector<float> robust_rc_real_free_energy_growth(mission_length, 0);\n  std::vector<float> robust_rc_nominal_state_used(mission_length, 0);\n\n  //  std::string file_prefix = \"/data/bvlahov3/RMPPI_CCM_control_trajectories_CoRL2020/FreeEnergyRMPPIData/\";\n  std::string file_prefix = \"\";\n\n  CONTROLLER::control_array control_std_dev = CONTROLLER::control_array::Constant(1.0);\n  CONTROLLER::control_trajectory u_traj_eigen = CONTROLLER::control_trajectory::Zero();\n  // Set first control to 1 across entire time\n  u_traj_eigen.row(0) = CONTROLLER::control_trajectory::Constant(1.0).row(0);\n  ccm::LinearCCM<DYN, num_timesteps> fb_controller(&model);\n  CONTROLLER rmppi_controller = CONTROLLER(&model, &cost, &fb_controller, dt, lambda, alpha, value_func_threshold,\n                                           control_std_dev, num_timesteps, u_traj_eigen);\n\n  // float x[num_rollouts * state_dim * 2];\n  // float x_dot[num_rollouts * state_dim * 2];\n  // float u[num_rollouts * control_dim * 2];\n  // float du[num_rollouts * control_dim * 2];\n  // float sigma_u[control_dim] = {0.5, 0.05}; // variance to sample noise from\n\n  // COST::control_matrix cost_variance = COST::control_matrix::Identity();\n  // for(int i = 0; i < control_dim; i++) {\n  //   cost_variance(i, i) = sigma_u[i];\n  // }\n  // float fb_u[num_rollouts * control_dim * state_dim];\n\n  DYN::state_array x_init_act, x_dot;\n  x_init_act << 2, 0, 0, 1;\n  DYN::state_array x_init_nom;\n  x_init_nom << 0, 0, 0.1, 0;\n\n  // rmppi_controller.computeControl(x_init_act);\n  DYN::state_array x = x_init_act;\n  std::string act_traj_file_name;\n  std::string nom_traj_file_name;\n  std::string nom_free_energy_name;\n  std::string act_free_energy_name;\n  std::string nom_state_used_name;\n  std::string act_free_energy_bound_name;\n  std::string nom_free_energy_bound_name;\n  std::string act_free_energy_growth_bound_name;\n  std::string act_free_energy_growth_name;\n  std::string nom_free_energy_growth_name;\n  for (int t = 0; t < mission_length; t++)\n  {\n    act_traj_file_name = file_prefix + \"robust_large_actual_traj_CCM_t_\" + std::to_string(t) + \".npy\";\n    nom_traj_file_name = file_prefix + \"robust_large_nominal_traj_CCM_t_\" + std::to_string(t) + \".npy\";\n    nom_free_energy_name = file_prefix + \"robust_large_nominal_free_energy_CCM_t_\" + std::to_string(t) + \".npy\";\n    act_free_energy_name = file_prefix + \"robust_large_actual_free_energy_CCM_t_\" + std::to_string(t) + \".npy\";\n    nom_state_used_name = file_prefix + \"robust_large_nominal_state_used_CCM_t_\" + std::to_string(t) + \".npy\";\n\n    act_free_energy_bound_name =\n        file_prefix + \"robust_large_actual_free_energy_bound_CCM_t_\" + std::to_string(t) + \".npy\";\n    nom_free_energy_bound_name =\n        file_prefix + \"robust_large_nominal_free_energy_bound_CCM_t_\" + std::to_string(t) + \".npy\";\n    act_free_energy_growth_bound_name =\n        file_prefix + \"robust_large_actual_free_energy_growth_bound_CCM_t_\" + std::to_string(t) + \".npy\";\n    act_free_energy_growth_name =\n        file_prefix + \"robust_large_actual_free_energy_growth_CCM_t_\" + std::to_string(t) + \".npy\";\n    nom_free_energy_growth_name =\n        file_prefix + \"robust_large_nominal_free_energy_growth_CCM_t_\" + std::to_string(t) + \".npy\";\n    // if (cost.computeStateCost(x) > 1000) {\n    //   std::cout << \"State Cost is \" << cost.computeStateCost(x) << std::endl;\n    //   std::cout << \"State was \" << x.transpose() << std::endl;\n    //   FAIL();\n    // }\n\n    if (tubeFailure(x.data()))\n    {\n      // cnpy::npy_save(act_traj_file_name, actual_trajectory_save.data(),\n      //                {mission_length, num_timesteps, DYN::STATE_DIM},\"w\");\n      // cnpy::npy_save(nom_traj_file_name, nominal_trajectory_save.data(),\n      //                {mission_length, num_timesteps, DYN::STATE_DIM},\"w\");\n      // cnpy::npy_save(act_traj_file_name, actual_trajectory_save.data(),\n      //                {mission_length, num_timesteps, DYN::STATE_DIM},\"w\");\n      // cnpy::npy_save(nom_traj_file_name, nominal_trajectory_save.data(),\n      //                 {mission_length, num_timesteps, DYN::STATE_DIM},\"w\");\n\n      cnpy::npy_save(act_traj_file_name, robust_rc_trajectory.data(),\n                     { mission_length, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n      cnpy::npy_save(nom_traj_file_name, robust_rc_nominal_traj.data(),\n                     { mission_length, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n      cnpy::npy_save(nom_free_energy_name, robust_rc_nominal_free_energy.data(), { mission_length }, \"w\");\n      cnpy::npy_save(act_free_energy_name, robust_rc_real_free_energy.data(), { mission_length }, \"w\");\n      cnpy::npy_save(nom_state_used_name, robust_rc_nominal_state_used.data(), { mission_length }, \"w\");\n      cnpy::npy_save(act_free_energy_bound_name, robust_rc_real_free_energy_bound.data(), { mission_length }, \"w\");\n      cnpy::npy_save(nom_free_energy_bound_name, robust_rc_nominal_free_energy_bound.data(), { mission_length }, \"w\");\n      cnpy::npy_save(act_free_energy_growth_bound_name, robust_rc_real_free_energy_growth_bound.data(),\n                     { mission_length }, \"w\");\n      cnpy::npy_save(act_free_energy_growth_name, robust_rc_real_free_energy_growth.data(), { mission_length }, \"w\");\n      cnpy::npy_save(nom_free_energy_growth_name, robust_rc_nominal_free_energy_growth.data(), { mission_length }, \"w\");\n      printf(\"Current Time: %f    \", t * dt);\n      model.printState(x.data());\n      std::cout << \"\\tCandidate Free Energies: \" << rmppi_controller.getCandidateFreeEnergy().transpose() << std::endl;\n      std::cout << \"Tube failure!!\" << std::endl;\n      FAIL() << \"Visualize the trajectories by running scripts/double_integrator/plot_DI_test_trajectories; \"\n                \"the argument to this python file is the build directory of MPPI-Generic\";\n    }\n\n    if (t % 2 == 0)\n    {\n      printf(\"Current Time: %5.2f    \", t * dt);\n      model.printState(x.data());\n      std::cout << \"\\tCandidate Free Energies: \" << rmppi_controller.getCandidateFreeEnergy().transpose() << std::endl;\n    }\n    rmppi_controller.updateImportanceSamplingControl(x, 1);\n    rmppi_controller.computeControl(x);\n\n    auto nominal_trajectory = rmppi_controller.getTargetStateSeq();\n    auto fe_stat = rmppi_controller.getFreeEnergyStatistics();\n\n    // for (int i = 0; i < num_timesteps; i++) {\n    //   for (int j = 0; j < DYN::STATE_DIM; j++) {\n    //     actual_trajectory_save[t * num_timesteps * DYN::STATE_DIM +\n    //                            i*DYN::STATE_DIM + j] = x(j);\n    //     nominal_trajectory_save[t * num_timesteps * DYN::STATE_DIM +\n    //                             i*DYN::STATE_DIM + j] = nominal_trajectory(j, i);\n    //   }\n    // }\n\n    // Save everything\n    saveState(x, t, robust_rc_trajectory);\n    saveTraj(nominal_trajectory, t, robust_rc_nominal_traj);\n    robust_rc_nominal_free_energy[t] = fe_stat.nominal_sys.freeEnergyMean;\n    robust_rc_real_free_energy[t] = fe_stat.real_sys.freeEnergyMean;\n    robust_rc_nominal_free_energy_bound[t] = value_func_threshold + 2 * fe_stat.nominal_sys.freeEnergyModifiedVariance;\n    robust_rc_real_free_energy_bound[t] = fe_stat.nominal_sys.freeEnergyMean +\n                                          cost.getLipshitzConstantCost() * 1 * (x - nominal_trajectory.col(0)).norm();\n\n    DYN::state_array x_nom = rmppi_controller.getTargetStateSeq().col(0);\n    DYN::control_array current_control = rmppi_controller.getControlSeq().col(0);\n\n    current_control += rmppi_controller.getCCMFeedbackGains(x, x_nom, current_control);\n    model.computeDynamics(x, current_control, x_dot);\n    model.updateState(x, x_dot, dt);\n    rmppi_controller.setPropogatedFeedbackState(x);\n\n    if (x.hasNaN())\n    {\n      std::cout << \"NANANANANA\\n\\n\\n\\nNANANANANAN\" << std::endl;\n    }\n\n    robust_rc_real_free_energy_growth_bound[t] = (value_func_threshold - fe_stat.nominal_sys.freeEnergyMean) +\n                                                 cost.getLipshitzConstantCost() * 1 * rmppi_controller.computeDF() +\n                                                 2 * fe_stat.nominal_sys.freeEnergyModifiedVariance;\n    robust_rc_nominal_free_energy_growth[t] = fe_stat.nominal_sys.increase;\n    robust_rc_real_free_energy_growth[t] = fe_stat.real_sys.increase;\n    robust_rc_nominal_state_used[t] = fe_stat.nominal_state_used;\n\n    x += universal_noise.col(t) * sqrt(model.getParams().system_noise) * dt;\n    if (x.hasNaN())\n    {\n      std::cout << \"NOISEYNOISE\\n\\n\\n\\nNANANANANAN\" << std::endl;\n    }\n    rmppi_controller.slideControlSequence(1);\n  }\n  // act_traj_file_name = file_prefix + \"robust_large_actual_CCM_t_\" +\n  //                   std::to_string(mission_length - 1) + \".npy\";\n  // nom_traj_file_name = file_prefix + \"robust_large_nominal_CCM_t_\" +\n  //                 std::to_string(mission_length - 1) + \".npy\";\n  // cnpy::npy_save(act_traj_file_name, actual_trajectory_save.data(),\n  //                    {mission_length, num_timesteps, DYN::STATE_DIM},\"w\");\n  // cnpy::npy_save(nom_traj_file_name, nominal_trajectory_save.data(),\n  //                 {mission_length, num_timesteps, DYN::STATE_DIM},\"w\");\n\n  cnpy::npy_save(act_traj_file_name, robust_rc_trajectory.data(),\n                 { mission_length, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(nom_traj_file_name, robust_rc_nominal_traj.data(),\n                 { mission_length, num_timesteps, DoubleIntegratorDynamics::STATE_DIM }, \"w\");\n  cnpy::npy_save(nom_free_energy_name, robust_rc_nominal_free_energy.data(), { mission_length }, \"w\");\n  cnpy::npy_save(act_free_energy_name, robust_rc_real_free_energy.data(), { mission_length }, \"w\");\n  cnpy::npy_save(nom_state_used_name, robust_rc_nominal_state_used.data(), { mission_length }, \"w\");\n  cnpy::npy_save(act_free_energy_bound_name, robust_rc_real_free_energy_bound.data(), { mission_length }, \"w\");\n  cnpy::npy_save(nom_free_energy_bound_name, robust_rc_nominal_free_energy_bound.data(), { mission_length }, \"w\");\n  cnpy::npy_save(act_free_energy_growth_bound_name, robust_rc_real_free_energy_growth_bound.data(), { mission_length },\n                 \"w\");\n  cnpy::npy_save(act_free_energy_growth_name, robust_rc_real_free_energy_growth.data(), { mission_length }, \"w\");\n  cnpy::npy_save(nom_free_energy_growth_name, robust_rc_nominal_free_energy_growth.data(), { mission_length }, \"w\");\n}\n"
  },
  {
    "path": "tests/mppi_core/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nlist(REMOVE_ITEM TARGET_SRCS \"CCM_tests.cu\")\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n  target_link_libraries(${T_NAME}\n          ${GTEST_LIBRARIES}\n          ${MPPI_HEADER_LIBRARY_NAME})\n  gtest_discover_tests(${T_NAME})\n  set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/mppi_core/base_plant_tester.cu",
    "content": "//\n// Created by jgibson37 on 2/24/20.\n//\n\n#include <gmock/gmock.h>\n#include <gtest/gtest.h>\n#include <mppi/core/base_plant.hpp>\n#include <mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh>\n#include <mppi/utils/test_helper.h>\n#include <mppi_test/mock_classes/mock_controller.h>\n\n#include <algorithm>\n#include <numeric>\n#include <random>\n\nconst float DT = 0.05;\n\ntemplate <class CONTROLLER_T>\nclass TestPlant : public BasePlant<CONTROLLER_T>\n{\npublic:\n  double time_ = 0.0;\n\n  int pubControlCalled = 0;\n  int pubNominalStateCalled = 0;\n\n  using c_array = typename CONTROLLER_T::control_array;\n  using c_traj = typename CONTROLLER_T::control_trajectory;\n\n  using s_array = typename CONTROLLER_T::state_array;\n  using s_traj = typename CONTROLLER_T::state_trajectory;\n\n  using DYN_T = typename CONTROLLER_T::TEMPLATED_DYNAMICS;\n  using DYN_PARAMS_T = typename DYN_T::DYN_PARAMS_T;\n  using COST_T = typename CONTROLLER_T::TEMPLATED_COSTS;\n  using COST_PARAMS_T = typename COST_T::COST_PARAMS_T;\n  double timestamp_;\n  double loop_speed_;\n\n  TestPlant(std::shared_ptr<MockController> controller, int hz = 20, int opt_stride = 1)\n    : BasePlant<CONTROLLER_T>(controller, hz, opt_stride)\n  {\n  }\n\n  void pubControl(const c_array& u) override\n  {\n    pubControlCalled++;\n  }\n\n  void pubNominalState(const s_array& s) override\n  {\n    pubNominalStateCalled++;\n  }\n\n  void pubFreeEnergyStatistics(MPPIFreeEnergyStatistics& fe_stats) override\n  {\n  }\n\n  void incrementTime()\n  {\n    time_ += DT;\n  }\n\n  void incrementTime(double dt)\n  {\n    time_ += dt;\n  }\n\n  int checkStatus() override\n  {\n    return 1;\n  }\n\n  double getCurrentTime()\n  {\n    return time_ + 0.3421;\n  }\n\n  double getPoseTime()\n  {\n    return time_;\n  }\n\n  // accessors for protected members\n  int getNumIter()\n  {\n    return this->num_iter_;\n  }\n  double getLastUsedPoseUpdateTime()\n  {\n    return this->last_used_state_update_time_;\n  }\n  int getStatus()\n  {\n    return this->status_;\n  }\n  bool getDebugMode()\n  {\n    return this->debug_mode_;\n  }\n  double getOptimizationDuration()\n  {\n    return this->optimization_duration_;\n  }\n  double getOptimizationAvg()\n  {\n    return this->avg_optimize_time_ms_;\n  }\n  double getOptimizeLoopDuration()\n  {\n    return this->optimize_loop_duration_;\n  }\n  double getLoopAvg()\n  {\n    return this->avg_loop_time_ms_;\n  }\n  double getFeedbackDuration()\n  {\n    return this->feedback_duration_;\n  }\n  double getFeedbackAvg()\n  {\n    return this->avg_feedback_time_ms_;\n  }\n  void setLastTime(double time)\n  {\n    time_ = time;\n  }\n  void setLastUsedTime(double time)\n  {\n    this->last_used_state_update_time_ = time;\n  }\n  double getSleepTimeAvg()\n  {\n    return this->avg_sleep_time_ms_;\n  }\n};\n\ntypedef TestPlant<MockController> MockTestPlant;\n\nclass BasePlantTest : public ::testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    EXPECT_CALL(mockCost, getParams()).Times(1);\n    EXPECT_CALL(mockDynamics, getParams()).Times(1);\n\n    EXPECT_CALL(mockDynamics, freeCudaMem()).Times(1);\n    EXPECT_CALL(mockCost, freeCudaMem()).Times(1);\n    EXPECT_CALL(mockSampler, freeCudaMem()).Times(1);\n    EXPECT_CALL(mockFeedback, freeCudaMem()).Times(1);\n\n    mockController = std::make_shared<MockController>();\n    mockController->setDt(DT);\n    EXPECT_CALL(*mockController, getDt()).WillRepeatedly(testing::Return(DT));\n    mockController->cost_ = &mockCost;\n    mockController->model_ = &mockDynamics;\n    mockController->sampler_ = &mockSampler;\n    mockController->fb_controller_ = &mockFeedback;\n\n    plant = std::make_shared<MockTestPlant>(mockController);\n  }\n\n  void TearDown() override\n  {\n  }\n  MockDynamics mockDynamics;\n  MockCost mockCost;\n  MockFeedback mockFeedback;\n  MockSamplingDistribution mockSampler;\n  std::shared_ptr<MockController> mockController;\n  std::shared_ptr<MockTestPlant> plant;\n\n  const float SMALL_TIME_MS = 8;\n};\n\nTEST_F(BasePlantTest, Constructor)\n{\n  EXPECT_EQ(plant->controller_, mockController);\n  EXPECT_EQ(plant->getHz(), 20);\n  EXPECT_EQ(plant->getTargetOptimizationStride(), 1);\n  EXPECT_EQ(plant->getNumIter(), 0);\n  EXPECT_EQ(plant->getLastUsedPoseUpdateTime(), 0);\n  EXPECT_EQ(plant->getStatus(), 1);\n  EXPECT_EQ(mockController->getFeedbackEnabled(), false);\n  EXPECT_EQ(plant->hasNewCostParams(), false);\n  EXPECT_EQ(plant->hasNewDynamicsParams(), false);\n}\n\nTEST_F(BasePlantTest, getAndSetState)\n{\n  // check initial state is zerod\n\n  MockController::state_array state = plant->getState();\n  for (int i = 0; i < 1; i++)\n  {\n    EXPECT_EQ(state(i), 0.0);\n  }\n\n  MockController::state_array new_state;\n  for (int i = 0; i < 1; i++)\n  {\n    new_state(i) = i;\n  }\n  plant->setState(new_state);\n  state = plant->getState();\n  for (int i = 0; i < 1; i++)\n  {\n    EXPECT_EQ(state(i), i);\n  }\n}\n\nTEST_F(BasePlantTest, getSetOptimizationStride)\n{\n  int optimization_stride = plant->getTargetOptimizationStride();\n\n  EXPECT_EQ(optimization_stride, 1);\n\n  plant->setTargetOptimizationStride(5);\n  optimization_stride = plant->getTargetOptimizationStride();\n\n  EXPECT_EQ(optimization_stride, 5);\n}\n\nTEST_F(BasePlantTest, getSetDynamicsParams)\n{\n  EXPECT_EQ(plant->hasNewDynamicsParams(), false);\n  MockTestPlant::DYN_PARAMS_T params;\n\n  params.test = 3;\n\n  plant->setDynamicsParams(params);\n  EXPECT_EQ(plant->hasNewDynamicsParams(), true);\n\n  MockTestPlant::DYN_PARAMS_T new_params = plant->getNewDynamicsParams();\n  EXPECT_EQ(plant->hasNewDynamicsParams(), false);\n  EXPECT_EQ(new_params.test, params.test);\n}\n\nTEST_F(BasePlantTest, getSetCostParams)\n{\n  EXPECT_EQ(plant->hasNewCostParams(), false);\n\n  MockTestPlant::COST_PARAMS_T params;\n  params.test = 100;\n\n  plant->setCostParams(params);\n  EXPECT_EQ(plant->hasNewCostParams(), true);\n\n  auto new_params = plant->getNewCostParams();\n  EXPECT_EQ(plant->hasNewCostParams(), false);\n  EXPECT_EQ(params.test, new_params.test);\n}\n\nTEST_F(BasePlantTest, updateParametersAllFalse)\n{\n  EXPECT_CALL(mockCost, setParams(testing::_)).Times(0);\n  EXPECT_CALL(mockDynamics, setParams(testing::_)).Times(0);\n\n  plant->updateParameters();\n}\n\nTEST_F(BasePlantTest, updateParametersAllTrue)\n{\n  EXPECT_CALL(mockCost, setParams(testing::_)).Times(1);\n  EXPECT_CALL(mockDynamics, setParams(testing::_)).Times(1);\n\n  plant->setDebugMode(true);\n  plant->setDynamicsParams(MockDynamics::DYN_PARAMS_T());\n  plant->setCostParams(MockCost::COST_PARAMS_T());\n\n  plant->updateParameters();\n}\n\nTEST_F(BasePlantTest, updateStateOutsideTimeTest)\n{\n  plant->setLastTime(0);\n\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n\n  MockController::state_array state = MockController::state_array::Zero();\n  plant->updateState(state, mockController->getDt() * mockController->getNumTimesteps() + 0.01);\n  EXPECT_EQ(plant->getState(), state);\n\n  plant->setLastTime(100);\n  plant->updateState(state, 99.99);\n  EXPECT_EQ(plant->getState(), state);\n  EXPECT_EQ(plant->pubControlCalled, 0);\n  EXPECT_EQ(plant->pubNominalStateCalled, 0);\n}\n\nTEST_F(BasePlantTest, updateStateTest)\n{\n  plant->setLastTime(0);\n\n  MockController::state_array state = MockController::state_array::Zero();\n  // EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_,\n  // testing::_)).Times(0);\n  plant->updateState(state, 12);\n  EXPECT_EQ(plant->pubControlCalled, 0);\n  EXPECT_EQ(plant->pubNominalStateCalled, 0);\n\n  // Calling updateState() should not pub controls when none have been calculated as of yet\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n  plant->setLastUsedTime(11);\n  plant->updateState(state, 12);\n  EXPECT_EQ(plant->getState(), state);\n  EXPECT_EQ(plant->pubControlCalled, 0);\n  EXPECT_EQ(plant->pubNominalStateCalled, 0);\n\n  // TODO in debug should pub nominal state\n}\n\nTEST_F(BasePlantTest, pubControlOnlyAfterControlAreCalculatedTest)\n{\n  ::testing::Sequence s1;\n  // Step 1: calling updateState() before controls are calculated should not call controller->getCurrentControl()\n  MockController::state_array state = MockController::state_array::Zero();\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n      .Times(0)\n      .InSequence(s1);\n  double curr_time = 0.0;\n  plant->updateState(state, curr_time);\n  EXPECT_EQ(plant->pubControlCalled, 0);\n  EXPECT_EQ(plant->pubNominalStateCalled, 0);\n  // ::testing::Mock::VerifyAndClearExpectations(mockController.get());\n\n  // Step 2: run control iteration inside plant\n  // Create valid outputs from gmock methods to prevent nan detection from triggering\n  MockController::control_trajectory valid_control_seq =\n      MockController::control_trajectory::Zero(MockDynamics::CONTROL_DIM, NUM_TIMESTEPS);\n  MockController::state_trajectory valid_state_seq =\n      MockController::state_trajectory::Zero(MockDynamics::STATE_DIM, NUM_TIMESTEPS);\n  EXPECT_CALL(*mockController, computeControl(testing::_, testing::_)).Times(1).InSequence(s1);\n  EXPECT_CALL(*mockController, getControlSeq()).Times(1).WillRepeatedly(testing::Return(valid_control_seq));\n  EXPECT_CALL(*mockController, getTargetStateSeq()).Times(1).WillRepeatedly(testing::Return(valid_state_seq));\n  // EXPECT_CALL(*mockController, getTargetOutputSeq()).Times(1);\n  std::atomic<bool> is_alive(true);\n  plant->runControlIteration(&is_alive);\n\n  // Step 3: calling updateState() now should use controller->getCurrentControl()\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n      .Times(1)\n      .InSequence(s1);\n  curr_time++;\n  plant->updateState(state, curr_time);\n  EXPECT_EQ(plant->pubControlCalled, 1);\n  EXPECT_EQ(plant->pubNominalStateCalled, 0);\n}\n\nTEST_F(BasePlantTest, EnsureReceivingStateCompletesRunControlIterationTest)\n{\n  std::atomic<bool> is_alive(true);\n  // Create valid outputs from gmock methods to prevent nan detection from triggering\n  MockController::control_trajectory valid_control_seq =\n      MockController::control_trajectory::Zero(MockDynamics::CONTROL_DIM, NUM_TIMESTEPS);\n  MockController::state_trajectory valid_state_seq =\n      MockController::state_trajectory::Zero(MockDynamics::STATE_DIM, NUM_TIMESTEPS);\n  EXPECT_CALL(*mockController, computeControl(testing::_, testing::_)).Times(1);\n  EXPECT_CALL(*mockController, getControlSeq()).Times(1).WillRepeatedly(testing::Return(valid_control_seq));\n  EXPECT_CALL(*mockController, getTargetStateSeq()).Times(1).WillRepeatedly(testing::Return(valid_state_seq));\n  // EXPECT_CALL(*mockController, getTargetOutputSeq()).Times(1);\n  std::thread new_thread(&MockTestPlant::runControlIteration, plant.get(), &is_alive);\n  // Wait some period of time and then call updateState()\n  std::cout << \"Wait for new state\" << std::endl;\n  std::this_thread::sleep_for(std::chrono::milliseconds(300));\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n  MockController::state_array state = MockController::state_array::Zero();\n  double curr_time = 0.0;\n  plant->updateState(state, curr_time);\n  std::cout << \"State sent\" << std::endl;\n  std::this_thread::sleep_for(std::chrono::milliseconds(100));\n  // is_alive.store(false);\n  new_thread.join();\n}\n\nTEST_F(BasePlantTest, runControlIterationStoppedTest)\n{\n  EXPECT_CALL(*mockController, slideControlSequence(testing::_)).Times(0);\n  EXPECT_CALL(*mockController, computeControl(testing::_, testing::_)).Times(0);\n\n  std::atomic<bool> is_alive(false);\n  plant->runControlIteration(&is_alive);\n}\n\n// TODO speed up to make tests run faster\nTEST_F(BasePlantTest, runControlIterationDebugFalseNoFeedbackTest)\n{\n  double init_time = 100;\n  plant->setLastTime(init_time);\n  MockController::state_array s = MockController::state_array::Zero();\n  for (int i = 0; i < 2; i++)\n  {\n    double wait_ms = 50 * i;\n\n    auto wait_function = [wait_ms](const Eigen::Ref<const MockController::state_array>& state,\n                                   int optimization_stride = 0) { usleep(wait_ms * 1e3); };\n\n    int expect_opt_stride = i > 0 ? 1 : 0;\n\n    MockDynamics::control_array control = MockDynamics::control_array::Zero();\n    if (i > 0)\n    {\n      EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n          .Times(1)\n          .WillRepeatedly(testing::Return(control));  // called by update state should not be called since time since\n                                                      // last\n      // opt should be zero on first iteration, should be called on second\n    }\n    else\n    {\n      EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n          .Times(0);\n    }\n\n    EXPECT_CALL(*mockController, slideControlSequence(expect_opt_stride)).Times(i > 0 ? 1 : 0);\n    EXPECT_CALL(*mockController, computeControl(testing::_, testing::_))\n        .Times(1)\n        .WillRepeatedly(testing::Invoke(wait_function));\n    MockController::control_trajectory control_seq = MockController::control_trajectory::Zero();\n    EXPECT_CALL(*mockController, getControlSeq()).Times(1).WillRepeatedly(testing::Return(control_seq));\n    MockController::state_trajectory state_seq = MockController::state_trajectory::Zero();\n    EXPECT_CALL(*mockController, getTargetStateSeq()).Times(1).WillRepeatedly(testing::Return(state_seq));\n\n    EXPECT_CALL(*mockController, computeFeedback(testing::_)).Times(0);\n    EXPECT_CALL(*mockController, getFeedbackControl(testing::_, testing::_, testing::_)).Times(0);\n    EXPECT_CALL(*mockController, computeFeedbackPropagatedStateSeq()).Times(1);\n    EXPECT_CALL(*mockController, calculateSampledStateTrajectories()).Times(0);\n\n    EXPECT_EQ(plant->getDebugMode(), false);\n\n    std::atomic<bool> is_alive(true);\n    plant->updateState(s, init_time + i * DT);\n    plant->runControlIteration(&is_alive);\n    plant->incrementTime();\n\n    EXPECT_EQ(plant->checkStatus(), 1);\n    EXPECT_EQ(plant->getStateTraj(), state_seq);\n    EXPECT_EQ(plant->getControlTraj(), control_seq);\n    MockController::TEMPLATED_FEEDBACK_STATE feedback = plant->getFeedbackState();\n    MockController::state_array state = MockController::state_array::Ones();\n    for (int j = 0; j < 100; j++)\n    {\n      // TODO check that feedback is correct\n      // auto result = feedback[i] * state;\n      // float sum = feedback[i] * state;\n      // EXPECT_FLOAT_EQ(result(0), 0);\n    }\n\n    // check last pose update\n    EXPECT_FLOAT_EQ(plant->getLastUsedPoseUpdateTime(), DT * i + init_time);\n    EXPECT_EQ(plant->getNumIter(), i + 1);\n    EXPECT_EQ(plant->getLastOptimizationStride(), expect_opt_stride);\n\n    EXPECT_THAT(plant->getOptimizationDuration(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n    EXPECT_LT(plant->getOptimizationAvg(), wait_ms + SMALL_TIME_MS);\n    EXPECT_THAT(plant->getOptimizeLoopDuration(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n    EXPECT_LT(plant->getLoopAvg(), wait_ms + SMALL_TIME_MS);\n    EXPECT_LE(plant->getFeedbackDuration(), SMALL_TIME_MS);\n    EXPECT_LE(plant->getFeedbackAvg(), SMALL_TIME_MS);\n  }\n}\n\nTEST_F(BasePlantTest, runControlIterationDebugFalseFeedbackTest)\n{\n  EXPECT_CALL(mockFeedback, initTrackingController()).Times(1);\n  mockController->initFeedback();\n\n  double init_time = 51789;\n  plant->setLastTime(init_time);\n\n  MockController::state_array s = MockController::state_array::Zero();\n  for (int i = 0; i < 10; i++)\n  {\n    double wait_ms = 50 * i;\n\n    auto wait_function = [wait_ms](const Eigen::Ref<const MockController::state_array>& state,\n                                   int optimization_stride = 0) { usleep(wait_ms * 1e3); };\n\n    int expect_opt_stride = i > 0 ? 1 : 0;\n\n    MockDynamics::control_array control = MockDynamics::control_array::Zero();\n    if (i > 0)\n    {\n      EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n          .Times(1)\n          .WillRepeatedly(testing::Return(control));  // called by update state should not be called since time since\n                                                      // last\n      // opt should be zero on first iteration, should be called on rest\n    }\n    else\n    {\n      EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n          .Times(0);\n    }\n\n    EXPECT_CALL(*mockController, slideControlSequence(expect_opt_stride)).Times(i > 0 ? 1 : 0);\n    EXPECT_CALL(*mockController, computeControl(testing::_, testing::_))\n        .Times(1)\n        .WillRepeatedly(testing::Invoke(wait_function));\n    MockController::control_trajectory control_seq = MockController::control_trajectory::Zero();\n    EXPECT_CALL(*mockController, getControlSeq()).Times(1).WillRepeatedly(testing::Return(control_seq));\n    MockController::state_trajectory state_seq = MockController::state_trajectory::Zero();\n    EXPECT_CALL(*mockController, getTargetStateSeq()).Times(1).WillRepeatedly(testing::Return(state_seq));\n\n    EXPECT_CALL(*mockController, computeFeedback(testing::_)).Times(1).WillRepeatedly(testing::Invoke(wait_function));\n    MockController::TEMPLATED_FEEDBACK_STATE feedback;\n    EXPECT_CALL(*mockController, getFeedbackState()).Times(1).WillRepeatedly(testing::Return(feedback));\n    EXPECT_CALL(*mockController, computeFeedbackPropagatedStateSeq()).Times(1);\n    EXPECT_CALL(*mockController, calculateSampledStateTrajectories()).Times(0);\n\n    EXPECT_EQ(plant->getDebugMode(), false);\n\n    std::atomic<bool> is_alive(true);\n    plant->updateState(s, init_time + i * DT);\n    plant->runControlIteration(&is_alive);\n    plant->incrementTime();\n\n    EXPECT_EQ(plant->checkStatus(), 1);\n    EXPECT_EQ(plant->getStateTraj(), state_seq);\n    EXPECT_EQ(plant->getControlTraj(), control_seq);\n    // EXPECT_EQ(plant->getFeedbackState(), feedback);\n\n    // check last pose update\n    EXPECT_FLOAT_EQ(plant->getLastUsedPoseUpdateTime(), DT * i + init_time);\n    EXPECT_EQ(plant->getNumIter(), i + 1);\n    EXPECT_EQ(plant->getLastOptimizationStride(), expect_opt_stride);\n\n    EXPECT_THAT(plant->getOptimizationDuration(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n    EXPECT_LT(plant->getOptimizationAvg(), wait_ms + SMALL_TIME_MS);\n    EXPECT_THAT(plant->getFeedbackDuration(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le((wait_ms + SMALL_TIME_MS) * 2)));\n    // TODO should be range as well\n    EXPECT_LT(plant->getFeedbackAvg(), wait_ms + SMALL_TIME_MS);\n    EXPECT_THAT(plant->getOptimizeLoopDuration(),\n                testing::AllOf(testing::Ge(wait_ms * 2), testing::Le((wait_ms + SMALL_TIME_MS) * 2)));\n    EXPECT_LT(plant->getLoopAvg(), (wait_ms + SMALL_TIME_MS) * 2);\n  }\n}\n\nTEST_F(BasePlantTest, runControlIterationDebugFalseFeedbackAvgTest)\n{\n  EXPECT_CALL(mockFeedback, initTrackingController()).Times(1);\n  mockController->initFeedback();\n\n  double init_time = 51531;\n  plant->setLastTime(init_time);\n  MockController::state_array s = MockController::state_array::Zero();\n\n  for (int i = 0; i < 10; i++)\n  {\n    double wait_ms = 50;\n\n    auto wait_function = [wait_ms](const Eigen::Ref<const MockController::state_array>& state,\n                                   int optimization_stride = 0) { usleep(wait_ms * 1e3); };\n\n    int expect_opt_stride = i > 0 ? 1 : 0;\n\n    MockDynamics::control_array control = MockDynamics::control_array::Zero();\n    if (i > 0)\n    {\n      EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n          .Times(1)\n          .WillRepeatedly(testing::Return(control));  // called by update state should not be called since time since\n                                                      // last\n      // opt should be zero on first iteration, should be called on rest\n    }\n    else\n    {\n      EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n          .Times(0);\n    }\n\n    EXPECT_CALL(*mockController, slideControlSequence(expect_opt_stride)).Times(i > 0 ? 1 : 0);\n    EXPECT_CALL(*mockController, computeControl(testing::_, testing::_))\n        .Times(1)\n        .WillRepeatedly(testing::Invoke(wait_function));\n    MockController::control_trajectory control_seq = MockController::control_trajectory::Zero();\n    EXPECT_CALL(*mockController, getControlSeq()).Times(1).WillRepeatedly(testing::Return(control_seq));\n    MockController::state_trajectory state_seq = MockController::state_trajectory::Zero();\n    EXPECT_CALL(*mockController, getTargetStateSeq()).Times(1).WillRepeatedly(testing::Return(state_seq));\n\n    EXPECT_CALL(*mockController, computeFeedback(testing::_)).Times(1).WillRepeatedly(testing::Invoke(wait_function));\n    MockController::TEMPLATED_FEEDBACK_STATE feedback;\n    EXPECT_CALL(*mockController, getFeedbackState()).Times(1).WillRepeatedly(testing::Return(feedback));\n    EXPECT_CALL(*mockController, computeFeedbackPropagatedStateSeq()).Times(1);\n    EXPECT_CALL(*mockController, calculateSampledStateTrajectories()).Times(0);\n\n    EXPECT_EQ(plant->getDebugMode(), false);\n\n    std::atomic<bool> is_alive(true);\n    plant->updateState(s, init_time + i * DT);\n    plant->runControlIteration(&is_alive);\n    plant->incrementTime();\n\n    EXPECT_EQ(plant->checkStatus(), 1);\n    EXPECT_EQ(plant->getStateTraj(), state_seq);\n    EXPECT_EQ(plant->getControlTraj(), control_seq);\n    // EXPECT_EQ(plant->getFeedbackState(), feedback);\n\n    // check last pose update\n    EXPECT_FLOAT_EQ(plant->getLastUsedPoseUpdateTime(), DT * i + init_time);\n    EXPECT_EQ(plant->getNumIter(), i + 1);\n    EXPECT_EQ(plant->getLastOptimizationStride(), expect_opt_stride);\n\n    EXPECT_THAT(plant->getOptimizationDuration(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n    EXPECT_THAT(plant->getOptimizationAvg(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n    EXPECT_THAT(plant->getOptimizeLoopDuration(),\n                testing::AllOf(testing::Ge(wait_ms * 2), testing::Le((wait_ms + SMALL_TIME_MS) * 2)));\n    EXPECT_THAT(plant->getLoopAvg(),\n                testing::AllOf(testing::Ge((wait_ms)*2), testing::Le((wait_ms + SMALL_TIME_MS) * 2)));\n    EXPECT_THAT(plant->getFeedbackDuration(),\n                testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n    EXPECT_THAT(plant->getFeedbackAvg(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  }\n}\n\nTEST_F(BasePlantTest, runControlLoopRegular)\n{\n  EXPECT_CALL(mockFeedback, initTrackingController()).Times(1);\n  mockController->initFeedback();\n\n  int hz = plant->getHz();\n  double test_duration = 1.0;  // in seconds for how long to run the test\n\n  // setup mock expected calls\n  EXPECT_CALL(mockCost, setParams(testing::_)).Times(0);\n  EXPECT_CALL(mockDynamics, setParams(testing::_)).Times(0);\n  EXPECT_CALL(*mockController, resetControls()).Times(1);\n\n  double wait_s =\n      (1.0 / hz) / 3;  // divide by 3 since wait is evenly split across computeFeedback, computeControl, and waiting\n\n  auto wait_function = [wait_s](const Eigen::Ref<const MockController::state_array>& state,\n                                int optimization_stride = 0) { usleep(wait_s * 1e6); };\n  int iterations = int(std::round((hz * 1.0) / (test_duration)));  // number of times the method will be called\n\n  MockDynamics::control_array control = MockDynamics::control_array::Zero();\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n      .Times(46)\n      .WillRepeatedly(testing::Return(control));  // called by update state should not be called since time since last\n  // slide control sequence is skipped on the first iteration\n  EXPECT_CALL(*mockController, slideControlSequence(1)).Times(iterations / 2 - 1);\n  EXPECT_CALL(*mockController, computeControl(testing::_, testing::_))\n      .Times(iterations / 2)\n      .WillRepeatedly(testing::Invoke(wait_function));\n  MockController::control_trajectory control_seq = MockController::control_trajectory::Zero();\n  EXPECT_CALL(*mockController, getControlSeq()).Times(iterations / 2).WillRepeatedly(testing::Return(control_seq));\n  MockController::state_trajectory state_seq = MockController::state_trajectory::Zero();\n  EXPECT_CALL(*mockController, getTargetStateSeq()).Times(iterations / 2).WillRepeatedly(testing::Return(state_seq));\n  EXPECT_CALL(*mockController, computeFeedback(testing::_))\n      .Times(iterations / 2)\n      .WillRepeatedly(testing::Invoke(wait_function));\n  MockController::TEMPLATED_FEEDBACK_STATE feedback;\n  EXPECT_CALL(*mockController, getFeedbackState()).Times(iterations / 2).WillRepeatedly(testing::Return(feedback));\n  EXPECT_CALL(*mockController, computeFeedbackPropagatedStateSeq()).Times(iterations / 2);\n  EXPECT_CALL(*mockController, calculateSampledStateTrajectories()).Times(0);\n\n  int init_time = 78;\n  plant->setLastTime(init_time);\n  MockDynamics::state_array state = MockDynamics::state_array::Zero();\n  plant->updateState(state, init_time);\n\n  std::atomic<bool> is_alive(true);\n  std::thread optimizer(&MockTestPlant::runControlLoop, plant.get(), &is_alive);\n\n  std::chrono::steady_clock::time_point loop_start = std::chrono::steady_clock::now();\n  std::chrono::duration<double, std::milli> loop_duration = std::chrono::steady_clock::now() - loop_start;\n  // counter is number of dts\n  for (int counter = 0; loop_duration.count() < test_duration * 1e3; counter++)\n  {\n    // wait until the correct hz has passed to tick the time\n    // state at 100 Hz\n    while (loop_duration.count() < (test_duration / 100) * 1e3 * counter)\n    {\n      usleep(50);\n      loop_duration = std::chrono::steady_clock::now() - loop_start;\n    }\n    if (counter / 5 > iterations / 2)\n    {  // this forces it to block\n      plant->incrementTime(0.01);\n      plant->updateState(state, plant->getPoseTime());\n    }\n  }\n  is_alive.store(false);\n  optimizer.join();\n\n  // check all the things\n  EXPECT_EQ(plant->checkStatus(), 1);\n  EXPECT_EQ(plant->getStateTraj(), state_seq);\n  EXPECT_EQ(plant->getControlTraj(), control_seq);\n  // EXPECT_EQ(plant->getFeedbackState(), feedback);\n\n  // check last pose update\n  EXPECT_NE(plant->getLastUsedPoseUpdateTime(), 0.0);\n  EXPECT_EQ(plant->getNumIter(), iterations / 2);\n  EXPECT_EQ(plant->getLastOptimizationStride(), 1);\n\n  double wait_ms = wait_s * 1e3;\n  EXPECT_THAT(plant->getOptimizationDuration(),\n              testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getOptimizationAvg(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getOptimizeLoopDuration(),\n              testing::AllOf(testing::Ge(wait_ms * 2), testing::Le(wait_ms * 2 + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getLoopAvg(), testing::AllOf(testing::Ge(wait_ms * 2), testing::Le(wait_ms * 2 + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getFeedbackDuration(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getFeedbackAvg(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  // 10 iters of just waiting, 10 iters of waiting for correct time\n  double expected_avg_wait = ((wait_ms * 3 * 10) + wait_ms * 10) / 10;\n  EXPECT_THAT(plant->getSleepTimeAvg(),\n              testing::AllOf(testing::Gt(expected_avg_wait), testing::Le(expected_avg_wait + SMALL_TIME_MS * 4)));\n}\n\nTEST_F(BasePlantTest, runControlLoopSlowed)\n{\n  mockController->initFeedback();\n\n  int hz = plant->getHz();\n  double test_duration = 1.0;  // in seconds for how long to run the test\n\n  MockTestPlant::COST_PARAMS_T cost_params;\n  MockTestPlant::DYN_PARAMS_T dyn_params;\n\n  double wait_s = (1.0 / hz);\n\n  auto wait_function = [wait_s](const Eigen::Ref<const MockController::state_array>& state,\n                                int optimization_stride = 0) { usleep(wait_s * 1e6); };\n\n  int iterations = int(std::round((hz * 1.0) / (test_duration)));  // number of times the method will be called\n\n  // setup mock expected calls\n  EXPECT_CALL(mockCost, setParams(testing::_)).Times(testing::AtLeast(54));  // 50 from waiting, 5 from run control\n                                                                             // iteration\n  EXPECT_CALL(mockDynamics, setParams(testing::_)).Times(testing::AtLeast(54));\n  EXPECT_CALL(*mockController, resetControls()).Times(1);\n\n  int expected_iters = iterations / 4 + 1;  // half do not count because of waiting, other half since we are slowed,\n                                            // plus one for init call\n  MockDynamics::control_array control = MockDynamics::control_array::Zero();\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_))\n      .Times(46)\n      .WillRepeatedly(testing::Return(control));  // called by update state should not be called since time since last\n  // slide control sequence is skipped on the first iteration\n  EXPECT_CALL(*mockController, slideControlSequence(1)).Times(1);\n  EXPECT_CALL(*mockController, slideControlSequence(2)).Times(expected_iters - 2);\n  EXPECT_CALL(*mockController, computeControl(testing::_, testing::_))\n      .Times(expected_iters)\n      .WillRepeatedly(testing::Invoke(wait_function));\n  MockController::control_trajectory control_seq = MockController::control_trajectory::Zero();\n  EXPECT_CALL(*mockController, getControlSeq()).Times(expected_iters).WillRepeatedly(testing::Return(control_seq));\n  MockController::state_trajectory state_seq = MockController::state_trajectory::Zero();\n  EXPECT_CALL(*mockController, getTargetStateSeq()).Times(expected_iters).WillRepeatedly(testing::Return(state_seq));\n  EXPECT_CALL(*mockController, computeFeedback(testing::_))\n      .Times(expected_iters)\n      .WillRepeatedly(testing::Invoke(wait_function));\n  MockController::TEMPLATED_FEEDBACK_STATE feedback;\n  EXPECT_CALL(*mockController, getFeedbackState()).Times(expected_iters).WillRepeatedly(testing::Return(feedback));\n  EXPECT_CALL(*mockController, computeFeedbackPropagatedStateSeq()).Times(expected_iters);\n  EXPECT_CALL(*mockController, calculateSampledStateTrajectories()).Times(0);\n\n  int init_time = 78;\n  plant->setLastTime(init_time);\n  MockDynamics::state_array state = MockDynamics::state_array::Zero();\n  plant->updateState(state, init_time);\n\n  std::atomic<bool> is_alive(true);\n  std::thread optimizer(&MockTestPlant::runControlLoop, plant.get(), &is_alive);\n\n  std::chrono::steady_clock::time_point loop_start = std::chrono::steady_clock::now();\n  std::chrono::duration<double, std::milli> loop_duration = std::chrono::steady_clock::now() - loop_start;\n  // counter is number of dts\n  for (int counter = 0; loop_duration.count() < test_duration * 1e3; counter++)\n  {\n    // wait until the correct hz has passed to tick the time\n    // state at 100 Hz\n    while (loop_duration.count() < (test_duration / 100) * 1e3 * counter)\n    {\n      usleep(50);\n      loop_duration = std::chrono::steady_clock::now() - loop_start;\n    }\n    if (counter / 5 > iterations / 2)\n    {  // this forces it to block\n      plant->incrementTime(0.01);\n      plant->updateState(state, plant->getPoseTime());\n    }\n\n    plant->setCostParams(cost_params);\n    plant->setDynamicsParams(dyn_params);\n  }\n  is_alive.store(false);\n  optimizer.join();\n\n  // check all the things\n  EXPECT_EQ(plant->checkStatus(), 1);\n  EXPECT_EQ(plant->getStateTraj(), state_seq);\n  EXPECT_EQ(plant->getControlTraj(), control_seq);\n  // EXPECT_EQ(plant->getFeedbackState(), feedback);\n\n  // check last pose update\n  EXPECT_NE(plant->getLastUsedPoseUpdateTime(), 0.0);\n  EXPECT_EQ(plant->getNumIter(), expected_iters);\n  EXPECT_EQ(plant->getLastOptimizationStride(), 2);\n\n  double wait_ms = wait_s * 1e3;\n  EXPECT_THAT(plant->getOptimizationDuration(),\n              testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getOptimizationAvg(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getOptimizeLoopDuration(),\n              testing::AllOf(testing::Ge(wait_ms * 2), testing::Le(wait_ms * 2 + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getLoopAvg(), testing::AllOf(testing::Ge(wait_ms * 2), testing::Le(wait_ms * 2 + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getFeedbackDuration(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  EXPECT_THAT(plant->getFeedbackAvg(), testing::AllOf(testing::Ge(wait_ms), testing::Le(wait_ms + SMALL_TIME_MS)));\n  // 10 iters of just waiting\n  double expected_avg_wait = ((wait_ms * 2 * 10)) / 6;\n  // EXPECT_THAT(plant->getSleepTimeAvg(), testing::AllOf(testing::Gt(expected_avg_wait - SMALL_TIME_MS),\n  //                                                     testing::Le(expected_avg_wait + SMALL_TIME_MS * 4)));\n}\n"
  },
  {
    "path": "tests/mppi_core/buffered_plant_tester.cu",
    "content": "#include <gtest/gtest.h>\n#include <gmock/gmock.h>\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n#include <numeric>\n\n#include <mppi/core/buffered_plant.hpp>\n#include <mppi/instantiations/cartpole_mppi/cartpole_mppi.cuh>\n#include <mppi_test/mock_classes/mock_dynamics.h>\n#include <mppi_test/mock_classes/mock_controller.h>\n#include <mppi_test/mock_classes/mock_costs.h>\n#include <mppi_test/mock_classes/mock_sampler.h>\n\nconst double precision = 1e-6;\nconst float DT = DT;\n\ntemplate <class CONTROLLER_T>\nclass TestPlant : public BufferedPlant<CONTROLLER_T>\n{\npublic:\n  double time_ = 0.0;\n\n  double avgDurationMs_ = 0;\n  double avgTickDuration_ = 0;\n  double avgSleepTime_ = 0;\n\n  using c_array = typename CONTROLLER_T::control_array;\n  using c_traj = typename CONTROLLER_T::control_trajectory;\n\n  using s_array = typename CONTROLLER_T::state_array;\n  using s_traj = typename CONTROLLER_T::state_trajectory;\n\n  using DYN_T = typename CONTROLLER_T::TEMPLATED_DYNAMICS;\n  using DYN_PARAMS_T = typename DYN_T::DYN_PARAMS_T;\n  using COST_T = typename CONTROLLER_T::TEMPLATED_COSTS;\n  using COST_PARAMS_T = typename COST_T::COST_PARAMS_T;\n  double timestamp_;\n  double loop_speed_;\n\n  using buffer_trajectory = typename BufferedPlant<CONTROLLER_T>::buffer_trajectory;\n\n  TestPlant(std::shared_ptr<MockController> controller, double buffer_time_horizon = 2.0, int hz = 20,\n            int opt_stride = 1)\n    : BufferedPlant<CONTROLLER_T>(controller, hz, opt_stride)\n  {\n    this->buffer_time_horizon_ = 2.0;\n    this->buffer_tau_ = 0.2;\n    this->buffer_dt_ = 0.02;\n    controller->setDt(this->buffer_dt_);\n  }\n\n  void pubControl(const c_array& u) override\n  {\n  }\n\n  void pubNominalState(const s_array& s) override\n  {\n  }\n\n  void pubFreeEnergyStatistics(MPPIFreeEnergyStatistics& fe_stats) override\n  {\n  }\n\n  void incrementTime()\n  {\n    time_ += DT;\n  }\n\n  int checkStatus() override\n  {\n    return 1;\n  }\n\n  double getCurrentTime() override\n  {\n    auto current_time = std::chrono::system_clock::now();\n    auto duration_in_seconds = std::chrono::duration<double>(current_time.time_since_epoch());\n    return duration_in_seconds.count();\n  }\n\n  double getPoseTime() override\n  {\n    return time_;\n  }\n\n  void setStateTimeToPoseTime()\n  {\n    this->state_time_ = time_;\n  }\n\n  // accessors for protected members\n  std::list<BufferMessage<Eigen::Vector3f>> getPrevPositionList()\n  {\n    return this->buffer_.getPrevPositionList();\n  }\n  std::list<BufferMessage<Eigen::Quaternionf>> getPrevQuaternionList()\n  {\n    return this->buffer_.getPrevQuaternionList();\n  }\n  std::list<BufferMessage<Eigen::Vector3f>> getPrevVelocityList()\n  {\n    return this->buffer_.getPrevVelocityList();\n  }\n  std::list<BufferMessage<Eigen::Vector3f>> getPrevOmegaList()\n  {\n    return this->buffer_.getPrevOmegaList();\n  }\n  std::list<BufferMessage<c_array>> getPrevControlList()\n  {\n    return this->buffer_.getPrevControlList();\n  }\n  std::map<std::string, std::list<BufferMessage<float>>> getPrevExtraList()\n  {\n    return this->buffer_.getPrevExtraList();\n  }\n  double getBufferTimeHorizon()\n  {\n    return this->buffer_time_horizon_;\n  }\n  double getBufferTau()\n  {\n    return this->buffer_tau_;\n  }\n  void setLastUsedUpdateTime(double time)\n  {\n    this->last_used_state_update_time_ = time;\n  }\n};\n\ntypedef TestPlant<MockController> MockTestPlant;\n\nclass BufferedPlantTest : public ::testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    mockController = std::make_shared<MockController>();\n    // EXPECT_CALL(*mockController, getDt()).Times(1);\n    EXPECT_CALL(*mockController, getDt()).WillRepeatedly(testing::Return(DT));\n    mockController->cost_ = &mockCost;\n    mockController->model_ = &mockDynamics;\n    mockController->fb_controller_ = &mockFeedback;\n    mockController->sampler_ = &mockSamplingDistribution;\n\n    EXPECT_CALL(*mockController->cost_, getParams()).Times(1);\n    EXPECT_CALL(*mockController->model_, getParams()).Times(1);\n\n    EXPECT_CALL(mockDynamics, freeCudaMem()).Times(1);\n    EXPECT_CALL(mockCost, freeCudaMem()).Times(1);\n    EXPECT_CALL(mockSamplingDistribution, freeCudaMem()).Times(1);\n    EXPECT_CALL(mockFeedback, freeCudaMem()).Times(1);\n\n    plant = std::make_shared<MockTestPlant>(mockController);\n\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(0.0, 100.0);\n  }\n\n  void TearDown() override\n  {\n    plant = nullptr;\n    mockController = nullptr;\n  }\n\n  MockSamplingDistribution mockSamplingDistribution;\n  MockDynamics mockDynamics;\n  MockCost mockCost;\n  MockFeedback mockFeedback;\n  std::shared_ptr<MockController> mockController;\n  std::shared_ptr<MockTestPlant> plant;\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(BufferedPlantTest, Constructor)\n{\n  auto prev_pos = plant->getPrevPositionList();\n  EXPECT_EQ(prev_pos.size(), 0);\n  auto prev_quat = plant->getPrevQuaternionList();\n  EXPECT_EQ(prev_quat.size(), 0);\n  auto prev_vel = plant->getPrevVelocityList();\n  EXPECT_EQ(prev_vel.size(), 0);\n  auto prev_omega = plant->getPrevOmegaList();\n  EXPECT_EQ(prev_omega.size(), 0);\n  auto prev_control = plant->getPrevControlList();\n  EXPECT_EQ(prev_control.size(), 0);\n  auto prev_extra = plant->getPrevExtraList();\n  EXPECT_EQ(prev_control.size(), 0);\n\n  EXPECT_FLOAT_EQ(plant->getBufferTimeHorizon(), 2.0);\n  EXPECT_FLOAT_EQ(plant->getBufferTau(), 0.2);\n  EXPECT_FLOAT_EQ(plant->getBufferDt(), 0.02);\n}\n\nTEST_F(BufferedPlantTest, setBufferDt)\n{\n  double new_buffer_dt = 30.0;\n  plant->setBufferDt(new_buffer_dt);\n  EXPECT_FLOAT_EQ(plant->getBufferDt(), new_buffer_dt);\n}\n\nTEST_F(BufferedPlantTest, interpNew)\n{\n  Eigen::Vector3f pos = Eigen::Vector3f::Ones();\n  Eigen::Quaternionf quat = Eigen::Quaternionf::Identity();\n  Eigen::Vector3f vel = Eigen::Vector3f::Random();\n  Eigen::Vector3f omega = Eigen::Vector3f::Random();\n\n  MockDynamics::state_array state = MockDynamics::state_array::Random();\n\n  EXPECT_CALL(mockDynamics, stateFromMap(testing::_)).Times(2).WillRepeatedly(testing::Return(state));\n  // Controls never calculated so no calls to controller in updateState()\n  EXPECT_CALL(*mockController, getDt()).Times(0);\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n\n  plant->setLastUsedUpdateTime(0);\n  plant->updateOdometry(pos, quat, vel, omega, 0.0);\n  plant->updateOdometry(pos, quat, vel, omega, 1.0);\n\n  std::map<std::string, float> result_state = plant->getInterpState(10);\n  EXPECT_FLOAT_EQ(result_state[\"OMEGA_X\"], omega.x());\n  EXPECT_FLOAT_EQ(result_state[\"OMEGA_Y\"], omega.y());\n  EXPECT_FLOAT_EQ(result_state[\"OMEGA_Z\"], omega.z());\n}\n\nTEST_F(BufferedPlantTest, updateControls)\n{\n  MockDynamics::control_array u = MockDynamics::control_array::Zero();\n  auto prev_control = plant->getPrevControlList();\n\n  for (int i = 0; i < 20; i++)\n  {\n    u = MockDynamics::control_array::Ones() * 0.2 * i;\n    plant->updateControls(u, 0.2 * i);\n    prev_control = plant->getPrevControlList();\n    EXPECT_EQ(prev_control.size(), i + 1);\n  }\n  for (int i = 0; i < 20; i++)\n  {\n    u = MockDynamics::control_array::Ones() * (0.2 * i + 0.1);\n    plant->updateControls(u, 0.2 * i + 0.1);\n    prev_control = plant->getPrevControlList();\n    EXPECT_EQ(prev_control.size(), i + 21);\n  }\n  prev_control = plant->getPrevControlList();\n  EXPECT_EQ(prev_control.size(), 40);\n\n  double time = 0;\n  for (auto it = prev_control.begin(); it != prev_control.end(); it++, time += 0.1)\n  {\n    EXPECT_FLOAT_EQ(it->time, time);\n    for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n    {\n      EXPECT_FLOAT_EQ(it->data(i), time);\n    }\n  }\n\n  plant->time_ = 4.0;\n  plant->setStateTimeToPoseTime();\n  plant->updateParameters();\n  prev_control = plant->getPrevControlList();\n  EXPECT_EQ(prev_control.size(), 20);\n}\n\nTEST_F(BufferedPlantTest, updateControlsRandom)\n{\n  MockDynamics::control_array u = MockDynamics::control_array::Random();\n  auto prev_control = plant->getPrevControlList();\n\n  for (int i = 0; i < 1000; i++)\n  {\n    plant->updateControls(u, distribution(generator));\n  }\n  prev_control = plant->getPrevControlList();\n  EXPECT_EQ(prev_control.size(), 1000);\n\n  std::vector<double> times(1000);\n\n  int index = 0;\n  for (auto it = prev_control.begin(); it != prev_control.end(); it++, index++)\n  {\n    times[index] = it->time;\n  }\n\n  EXPECT_TRUE(std::is_sorted(times.begin(), times.end()));\n}\n\n// TEST_F(BufferedPlantTest, updateControlsInterp)\n// {\n//   MockDynamics::control_array u = MockDynamics::control_array::Zero();\n//   std::list<BufferMessage<MockDynamics::control_array>> prev_control = plant->getPrevControlList();\n//\n//   for (int i = 0; i < 21; i++)\n//   {\n//     u = MockDynamics::control_array::Ones() * 0.2 * i;\n//     plant->updateControls(u, 0.2 * i);\n//     prev_control = plant->getPrevControlList();\n//     EXPECT_EQ(prev_control.size(), i + 1);\n//   }\n//   prev_control = plant->getPrevControlList();\n//   EXPECT_EQ(prev_control.size(), 21);\n//\n//   for (double t = -2.0; t < 6.0; t += 0.01)\n//   {\n//     MockDynamics::control_array u_interp = MockTestPlant::interp<MockDynamics::control_array>(prev_control, t);\n//     if (t < 0)\n//     {\n//       for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n//       {\n//         EXPECT_NEAR(u_interp(i), 0, precision) << \"at time \" << t;\n//       }\n//     }\n//     else if (t > 4.0)\n//     {\n//       for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n//       {\n//         EXPECT_NEAR(u_interp(i), 4.0, precision) << \"at time \" << t;\n//       }\n//     }\n//     else\n//     {\n//       for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n//       {\n//         EXPECT_NEAR(u_interp(i), t, precision) << \"at time \" << t;\n//       }\n//     }\n//   }\n// }\n\nTEST_F(BufferedPlantTest, extraValues)\n{\n  auto extra_info = plant->getPrevExtraList();\n  for (int i = 0; i < 20; i++)\n  {\n    plant->updateExtraValue(\"steering_angle\", 0.2 * i, 0.2 * i);\n    plant->updateExtraValue(\"steering_vel\", 0.2 * i, 0.2 * i);\n    extra_info = plant->getPrevExtraList();\n    EXPECT_EQ(extra_info.size(), 2);\n    EXPECT_EQ(extra_info[\"steering_angle\"].size(), i + 1);\n    EXPECT_EQ(extra_info[\"steering_vel\"].size(), i + 1);\n  }\n  for (int i = 0; i < 20; i++)\n  {\n    plant->updateExtraValue(\"steering_angle\", 0.2 * i + 0.1, 0.2 * i + 0.1);\n    plant->updateExtraValue(\"steering_vel\", 0.2 * i + 0.1, 0.2 * i + 0.1);\n    extra_info = plant->getPrevExtraList();\n    EXPECT_EQ(extra_info.size(), 2);\n    EXPECT_EQ(extra_info[\"steering_angle\"].size(), i + 21);\n    EXPECT_EQ(extra_info[\"steering_vel\"].size(), i + 21);\n  }\n  extra_info = plant->getPrevExtraList();\n  EXPECT_EQ(extra_info.size(), 2);\n  EXPECT_EQ(extra_info[\"steering_angle\"].size(), 40);\n  EXPECT_EQ(extra_info[\"steering_vel\"].size(), 40);\n\n  for (auto list_it = extra_info.begin(); list_it != extra_info.end(); list_it++)\n  {\n    double time = 0;\n    for (auto it = list_it->second.begin(); it != list_it->second.end(); it++, time += 0.1)\n    {\n      EXPECT_FLOAT_EQ(it->time, time);\n      for (int i = 0; i < MockDynamics::CONTROL_DIM; i++)\n      {\n        EXPECT_FLOAT_EQ(it->data, time);\n      }\n    }\n  }\n\n  plant->cleanBuffers(4.0);\n  extra_info = plant->getPrevExtraList();\n  EXPECT_EQ(extra_info.size(), 2);\n  EXPECT_EQ(extra_info[\"steering_angle\"].size(), 20);\n  EXPECT_EQ(extra_info[\"steering_vel\"].size(), 20);\n}\n\nTEST_F(BufferedPlantTest, updateOdometry)\n{\n  Eigen::Vector3f pos = Eigen::Vector3f::Ones();\n  Eigen::Quaternionf quat = Eigen::Quaternionf::Identity();\n  Eigen::Vector3f vel = Eigen::Vector3f::Ones();\n  Eigen::Vector3f omega = Eigen::Vector3f::Ones();\n\n  MockDynamics::state_array state = MockDynamics::state_array::Random();\n\n  EXPECT_CALL(mockDynamics, stateFromMap(testing::_)).Times(2).WillRepeatedly(testing::Return(state));\n  // Controls never calculated so no calls to controller in updateState()\n  EXPECT_CALL(*mockController, getDt()).Times(0);\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n\n  plant->setLastUsedUpdateTime(0.0);\n  plant->updateOdometry(pos, quat, vel, omega, 0.0);\n\n  auto prev_pos = plant->getPrevPositionList();\n  auto prev_quat = plant->getPrevQuaternionList();\n  auto prev_vel = plant->getPrevVelocityList();\n  auto prev_omega = plant->getPrevOmegaList();\n\n  EXPECT_EQ(prev_pos.size(), 1);\n  EXPECT_EQ(prev_quat.size(), 1);\n  EXPECT_EQ(prev_vel.size(), 1);\n  EXPECT_EQ(prev_omega.size(), 1);\n  MockDynamics::state_array result_state = plant->getState();\n  EXPECT_LT((state - result_state).norm(), 1e-8);\n\n  pos = Eigen::Vector3f::Ones() * 3;\n  vel = Eigen::Vector3f::Ones() * 4;\n  omega = Eigen::Vector3f::Ones() * 5;\n  quat = Eigen::AngleAxisf(M_PI_2, Eigen::Vector3f::UnitZ());\n\n  plant->updateOdometry(pos, quat, vel, omega, 1.0);\n\n  prev_pos = plant->getPrevPositionList();\n  prev_quat = plant->getPrevQuaternionList();\n  prev_vel = plant->getPrevVelocityList();\n  prev_omega = plant->getPrevOmegaList();\n\n  EXPECT_EQ(prev_pos.size(), 2);\n  EXPECT_EQ(prev_quat.size(), 2);\n  EXPECT_EQ(prev_vel.size(), 2);\n  EXPECT_EQ(prev_omega.size(), 2);\n  result_state = plant->getState();\n  EXPECT_LT((state - result_state).norm(), 1e-8);\n\n  std::map<std::string, float> interp = plant->getInterpState(0.5);\n\n  EXPECT_FLOAT_EQ(interp.at(\"POS_X\"), 2);\n  EXPECT_FLOAT_EQ(interp.at(\"POS_Y\"), 2);\n  EXPECT_FLOAT_EQ(interp.at(\"POS_Z\"), 2);\n\n  EXPECT_FLOAT_EQ(interp.at(\"Q_W\"), 0.92387962);\n  EXPECT_FLOAT_EQ(interp.at(\"Q_X\"), 0.0);\n  EXPECT_FLOAT_EQ(interp.at(\"Q_Y\"), 0.0);\n  EXPECT_FLOAT_EQ(interp.at(\"Q_Z\"), 0.3826834);\n\n  EXPECT_FLOAT_EQ(interp.at(\"VEL_X\"), 2.5);\n  EXPECT_FLOAT_EQ(interp.at(\"VEL_Y\"), 2.5);\n  EXPECT_FLOAT_EQ(interp.at(\"VEL_Z\"), 2.5);\n\n  EXPECT_FLOAT_EQ(interp.at(\"OMEGA_X\"), 3);\n  EXPECT_FLOAT_EQ(interp.at(\"OMEGA_Y\"), 3);\n  EXPECT_FLOAT_EQ(interp.at(\"OMEGA_Z\"), 3);\n}\n\nTEST_F(BufferedPlantTest, getInterpState)\n{\n  Eigen::Vector3f pos = Eigen::Vector3f::Ones();\n  Eigen::Quaternionf quat = Eigen::Quaternionf::Identity();\n  Eigen::Vector3f vel = Eigen::Vector3f::Ones();\n  Eigen::Vector3f omega = Eigen::Vector3f::Ones();\n  MockDynamics::control_array u = MockDynamics::control_array::Ones();\n\n  MockDynamics::state_array state = MockDynamics::state_array::Random();\n\n  plant->setLastUsedUpdateTime(0.0);\n  EXPECT_CALL(mockDynamics, stateFromMap(testing::_)).Times(2).WillRepeatedly(testing::Return(state));\n  // Controls never calculated so no calls to controller in updateState()\n  EXPECT_CALL(*mockController, getDt()).Times(0);\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n\n  plant->updateOdometry(pos, quat, vel, omega, 0.0);\n  plant->updateControls(u, 0.0);\n  plant->updateExtraValue(\"steering_angle\", 1, 0);\n  plant->updateExtraValue(\"steering_vel\", 1, 0);\n\n  pos = Eigen::Vector3f::Ones() * 2;\n  vel = Eigen::Vector3f::Ones() * 2;\n  omega = Eigen::Vector3f::Ones() * 2;\n  quat = Eigen::AngleAxisf(M_PI_2f32, Eigen::Vector3f::UnitZ());\n  u = MockDynamics::control_array::Ones() * 2;\n\n  plant->updateOdometry(pos, quat, vel, omega, 1.0);\n  plant->updateControls(u, 1.0);\n  plant->updateExtraValue(\"steering_angle\", 2, 1.0);\n  plant->updateExtraValue(\"steering_vel\", 2, 1.0);\n\n  std::map<std::string, float> map = plant->getInterpState(0.5);\n\n  EXPECT_EQ(map.size(), 19);\n\n  EXPECT_FLOAT_EQ(map.at(\"POS_X\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"POS_Y\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"POS_Z\"), 1.5);\n\n  EXPECT_FLOAT_EQ(map.at(\"Q_W\"), 0.92387962);\n  EXPECT_FLOAT_EQ(map.at(\"Q_X\"), 0.0);\n  EXPECT_FLOAT_EQ(map.at(\"Q_Y\"), 0.0);\n  EXPECT_FLOAT_EQ(map.at(\"Q_Z\"), 0.3826834);\n\n  EXPECT_FLOAT_EQ(map.at(\"ROLL\"), 0);\n  EXPECT_FLOAT_EQ(map.at(\"PITCH\"), 0);\n  EXPECT_FLOAT_EQ(map.at(\"YAW\"), M_PI_4f32);\n\n  EXPECT_FLOAT_EQ(map.at(\"VEL_X\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"VEL_Y\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"VEL_Z\"), 1.5);\n\n  EXPECT_FLOAT_EQ(map.at(\"OMEGA_X\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"OMEGA_Y\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"OMEGA_Z\"), 1.5);\n\n  EXPECT_FLOAT_EQ(map.at(\"CONTROL_0\"), 1.5);\n\n  EXPECT_FLOAT_EQ(map.at(\"steering_angle\"), 1.5);\n  EXPECT_FLOAT_EQ(map.at(\"steering_vel\"), 1.5);\n}\n\nTEST_F(BufferedPlantTest, getInterpBuffer)\n{\n  Eigen::Vector3f pos = Eigen::Vector3f::Zero();\n  Eigen::Quaternionf quat = Eigen::Quaternionf::Identity();\n  Eigen::Vector3f vel = Eigen::Vector3f::Zero();\n  Eigen::Vector3f omega = Eigen::Vector3f::Zero();\n  MockDynamics::control_array u = MockDynamics::control_array::Zero();\n\n  MockDynamics::state_array state = MockDynamics::state_array::Random();\n\n  plant->setLastUsedUpdateTime(0.0);\n  EXPECT_CALL(mockDynamics, stateFromMap(testing::_)).Times(2).WillRepeatedly(testing::Return(state));\n  // Controls never calculated so no calls to controller in updateState()\n  EXPECT_CALL(*mockController, getDt()).Times(0);\n  EXPECT_CALL(*mockController, getCurrentControl(testing::_, testing::_, testing::_, testing::_, testing::_)).Times(0);\n\n  plant->updateOdometry(pos, quat, vel, omega, 0.0);\n  plant->updateControls(u, 0.0);\n  plant->updateExtraValue(\"steering_angle\", 0, 0);\n  plant->updateExtraValue(\"steering_vel\", 0, 0);\n\n  pos = Eigen::Vector3f::Ones();\n  vel = Eigen::Vector3f::Ones();\n  omega = Eigen::Vector3f::Ones();\n  quat = Eigen::AngleAxisf(M_PI_2, Eigen::Vector3f::UnitZ());\n  u = MockDynamics::control_array::Ones();\n\n  plant->updateOdometry(pos, quat, vel, omega, 1.0);\n  plant->updateControls(u, 1.0);\n  plant->updateExtraValue(\"steering_angle\", 1, 1.0);\n  plant->updateExtraValue(\"steering_vel\", 1, 1.0);\n\n  MockTestPlant::buffer_trajectory buffer = plant->getSmoothedBuffer(1.0);\n\n  EXPECT_EQ(buffer.size(), 19);\n  EXPECT_EQ(buffer.at(\"POS_X\").size(), 11);\n\n  for (int t = 0; t < 11; t++)\n  {\n    double time = 0.8 + t * 0.02;\n    EXPECT_FLOAT_EQ(buffer.at(\"POS_X\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_FLOAT_EQ(buffer.at(\"POS_Y\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_FLOAT_EQ(buffer.at(\"POS_Z\")(t), time) << \"at time \" << t << \" \" << time;\n\n    EXPECT_FLOAT_EQ(buffer.at(\"VEL_X\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_FLOAT_EQ(buffer.at(\"VEL_Y\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_FLOAT_EQ(buffer.at(\"VEL_Z\")(t), time) << \"at time \" << t << \" \" << time;\n\n    EXPECT_FLOAT_EQ(buffer.at(\"OMEGA_X\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_FLOAT_EQ(buffer.at(\"OMEGA_Y\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_FLOAT_EQ(buffer.at(\"OMEGA_Z\")(t), time) << \"at time \" << t << \" \" << time;\n\n    EXPECT_FLOAT_EQ(buffer.at(\"ROLL\")(t), 0);\n    EXPECT_FLOAT_EQ(buffer.at(\"PITCH\")(t), 0);\n\n    EXPECT_FLOAT_EQ(buffer.at(\"CONTROL_0\")(t), time) << \"at time \" << t << \" \" << time;\n    EXPECT_NEAR(buffer.at(\"steering_angle\")(t), time, precision) << \"at time \" << t << \" \" << time;\n    EXPECT_NEAR(buffer.at(\"steering_vel\")(t), time, precision) << \"at time \" << t << \" \" << time;\n  }\n}\n"
  },
  {
    "path": "tests/mppi_core/normexp_kernel_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <kernel_tests/core/normexp_kernel_test.cuh>\n#include <mppi/utils/test_helper.h>\n\n#include <algorithm>\n#include <chrono>\n#include <numeric>\n#include <random>\n\nclass NormExpKernel : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(100.0, 2.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\ntemplate <int NUM_ROLLOUTS>\n__global__ void computeNormalizerKernel(const float* __restrict__ costs, float* __restrict__ output)\n{\n  __shared__ float reduction_buffer[NUM_ROLLOUTS];\n  int global_idx = threadIdx.x;\n  int global_step = blockDim.x;\n  *output = mppi::kernels::computeNormalizer(NUM_ROLLOUTS, costs, reduction_buffer, global_idx, global_step);\n};\n\ntemplate <int NUM_ROLLOUTS>\n__global__ void computeBaselineCostKernel(const float* __restrict__ costs, float* __restrict__ output)\n{\n  __shared__ float reduction_buffer[NUM_ROLLOUTS];\n  int global_idx = threadIdx.x;\n  int global_step = blockDim.x;\n  *output = mppi::kernels::computeBaselineCost(NUM_ROLLOUTS, costs, reduction_buffer, global_idx, global_step);\n};\n\nTEST_F(NormExpKernel, computeBaselineCost_Test)\n{\n  const int num_rollouts = 4196;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float min_cost_known = *std::min_element(cost_vec.begin(), cost_vec.end());\n  float min_cost_compute = mppi::kernels::computeBaselineCost(cost_vec.data(), num_rollouts);\n\n  ASSERT_FLOAT_EQ(min_cost_compute, min_cost_known);\n}\n\nTEST_F(NormExpKernel, computeNormalizer_Test)\n{\n  const int num_rollouts = 1024;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float sum_cost_known = std::accumulate(cost_vec.begin(), cost_vec.end(), 0.0);\n  float sum_cost_compute = mppi::kernels::computeNormalizer(cost_vec.data(), num_rollouts);\n\n  ASSERT_FLOAT_EQ(sum_cost_compute, sum_cost_known);\n}\n\nTEST_F(NormExpKernel, computeNormalizerDevice_Test)\n{\n  const int num_rollouts = 6048;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n\n  // Use a range based for loop to set the cost\n  for (int i = 0; i < cost_vec.size(); i++)\n  {\n    cost_vec[i] = distribution(generator);\n  }\n  float* norm_d;\n  float* costs_d;\n  float sum_cost_compute;\n  HANDLE_ERROR(cudaMalloc((void**)&norm_d, sizeof(float)));\n  HANDLE_ERROR(cudaMalloc((void**)&costs_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(cudaMemcpy(costs_d, cost_vec.data(), sizeof(float) * num_rollouts, cudaMemcpyHostToDevice));\n  computeNormalizerKernel<num_rollouts><<<1, 1024>>>(costs_d, norm_d);\n  HANDLE_ERROR(cudaMemcpy(&sum_cost_compute, norm_d, sizeof(float), cudaMemcpyDeviceToHost));\n\n  float sum_cost_known = std::accumulate(cost_vec.begin(), cost_vec.end(), 0.0);\n  ASSERT_FLOAT_EQ(sum_cost_compute, sum_cost_known);\n}\n\nTEST_F(NormExpKernel, computeBaselineCostDevice_Test)\n{\n  const int num_rollouts = 6048;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n\n  // Use a range based for loop to set the cost\n  for (int i = 0; i < cost_vec.size(); i++)\n  {\n    cost_vec[i] = cost_vec.size() - i;\n  }\n  std::cout << std::endl;\n  float* norm_d;\n  float* costs_d;\n  float sum_cost_compute;\n  HANDLE_ERROR(cudaMalloc((void**)&norm_d, sizeof(float)));\n  HANDLE_ERROR(cudaMalloc((void**)&costs_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(cudaMemcpy(costs_d, cost_vec.data(), sizeof(float) * num_rollouts, cudaMemcpyHostToDevice));\n  computeBaselineCostKernel<num_rollouts><<<1, 1024>>>(costs_d, norm_d);\n  HANDLE_ERROR(cudaMemcpy(&sum_cost_compute, norm_d, sizeof(float), cudaMemcpyDeviceToHost));\n\n  float sum_cost_known = *std::min_element(cost_vec.begin(), cost_vec.end());\n  ASSERT_FLOAT_EQ(sum_cost_compute, sum_cost_known);\n}\n\nTEST_F(NormExpKernel, computeExpNorm_Test)\n{\n  const int num_rollouts = 555;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  std::array<float, num_rollouts> normalized_compute = { 0 };\n  std::array<float, num_rollouts> normalized_known = { 0 };\n  float gamma = 0.3;\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float baseline = *std::min_element(cost_vec.begin(), cost_vec.end());\n\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    normalized_known[i] = expf(-gamma * (cost_vec[i] - baseline));\n  }\n\n  launchNormExp_KernelTest<num_rollouts>(cost_vec, gamma, baseline, normalized_compute);\n\n  array_assert_float_eq<num_rollouts>(normalized_compute, normalized_known);\n}\n\nTEST_F(NormExpKernel, comparisonTestAutorallyMPPI_Generic)\n{\n  const int num_rollouts = 28754;\n  const int blocksize_x = 8;\n  const int blocksize_y = 8;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  std::array<float, num_rollouts> normalized_autorally = { 0 };\n  std::array<float, num_rollouts> normalized_generic = { 0 };\n  float gamma = 0.3;\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float baseline = *std::min_element(cost_vec.begin(), cost_vec.end());\n\n  launchGenericNormExpKernelTest<num_rollouts, blocksize_x>(cost_vec, gamma, baseline, normalized_generic);\n\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    float cost = cost_vec[i] - baseline;\n    cost = expf(-gamma * cost);\n    EXPECT_FLOAT_EQ(normalized_generic[i], cost);\n  }\n}\n\nTEST_F(NormExpKernel, comparisonTestHostvsDeviceBaselineNormalizerCalculation)\n{\n  const int num_rollouts = 10000;\n  const int blocksize_x = 8;\n  const int num_iterations = 2500;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  std::array<float, num_rollouts> host_dev_costs = { 0 };\n  std::array<float, num_rollouts> dev_only_costs = { 0 };\n  float lambda = 0.3;\n  double old_method_ms = 0;\n  double new_method_ms = 0;\n  cudaStream_t stream;\n  cudaStreamCreate(&stream);\n\n  float* costs_dev_only_d;\n  float* costs_host_only_d;\n  float2* baseline_and_normalizer_d;\n  float2 host_components, device_components;\n  HANDLE_ERROR(cudaMalloc((void**)&baseline_and_normalizer_d, sizeof(float2)));\n  HANDLE_ERROR(cudaMalloc((void**)&costs_dev_only_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(cudaMalloc((void**)&costs_host_only_d, sizeof(float) * num_rollouts));\n\n  // Use a range based for loop to set the cost\n  for (int i = 0; i < num_iterations; i++)\n  {\n    for (auto& cost : cost_vec)\n    {\n      cost = distribution(generator);\n    }\n\n    /**\n     * @brief Prep CUDA components\n     *\n     */\n    HANDLE_ERROR(cudaMemcpyAsync(costs_dev_only_d, cost_vec.data(), sizeof(float) * num_rollouts,\n                                 cudaMemcpyHostToDevice, stream));\n    HANDLE_ERROR(cudaMemcpyAsync(costs_host_only_d, cost_vec.data(), sizeof(float) * num_rollouts,\n                                 cudaMemcpyHostToDevice, stream));\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n    auto start_old_method_t = std::chrono::steady_clock::now();\n    // Run old method to transform costs\n    HANDLE_ERROR(cudaMemcpyAsync(host_dev_costs.data(), costs_host_only_d, num_rollouts * sizeof(float),\n                                 cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n    host_components.x = mppi::kernels::computeBaselineCost(host_dev_costs.data(), num_rollouts);\n    mppi::kernels::launchNormExpKernel(num_rollouts, blocksize_x, costs_host_only_d, 1.0 / lambda, host_components.x,\n                                       stream, false);\n    HANDLE_ERROR(cudaMemcpyAsync(host_dev_costs.data(), costs_host_only_d, num_rollouts * sizeof(float),\n                                 cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n    host_components.y = mppi::kernels::computeNormalizer(host_dev_costs.data(), num_rollouts);\n    old_method_ms += (std::chrono::steady_clock::now() - start_old_method_t).count() / 1e6;\n\n    auto start_new_method_t = std::chrono::steady_clock::now();\n    // Run new method to transform costs\n    mppi::kernels::launchWeightTransformKernel<num_rollouts>(costs_dev_only_d, baseline_and_normalizer_d, 1.0 / lambda,\n                                                             1, stream, false);\n    HANDLE_ERROR(cudaMemcpyAsync(dev_only_costs.data(), costs_dev_only_d, num_rollouts * sizeof(float),\n                                 cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(\n        cudaMemcpyAsync(&device_components, baseline_and_normalizer_d, sizeof(float2), cudaMemcpyDeviceToHost, stream));\n    HANDLE_ERROR(cudaStreamSynchronize(stream));\n    new_method_ms += (std::chrono::steady_clock::now() - start_new_method_t).count() / 1e6;\n  }\n\n  std::cout << \"Old method averaged \" << old_method_ms / num_iterations << \" ms and the new method averaged \"\n            << new_method_ms / num_iterations << \" ms\" << std::endl;\n\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    ASSERT_FLOAT_EQ(dev_only_costs[i], host_dev_costs[i]);\n  }\n  ASSERT_FLOAT_EQ(device_components.x, host_components.x);\n  ASSERT_FLOAT_EQ(device_components.y, host_components.y);\n}\n"
  },
  {
    "path": "tests/mppi_core/rmppi_kernel_tests.cu",
    "content": "//\n// Created by mgandhi on 5/23/20.\n//\n#include <gtest/gtest.h>\n#include <kernel_tests/core/rmppi_kernel_test.cuh>\n#include <mppi/controllers/MPPI/mppi_controller.cuh>\n#include <mppi/core/rmppi_kernels.cuh>\n#include <mppi/cost_functions/double_integrator/double_integrator_circle_cost.cuh>\n#include <mppi/cost_functions/quadratic_cost/quadratic_cost.cuh>\n#include <mppi/dynamics/double_integrator/di_dynamics.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n#include <mppi/utils/test_helper.h>\n#include <mppi/utils/logger.hpp>\n\n#include <iostream>\n#include <vector>\n\nclass RMPPIKernels : public ::testing::Test\n{\npublic:\n  static const int num_timesteps = 50;\n  static const int nominal_idx = 0;\n  static const int real_idx = 1;\n  using DYN_T = DoubleIntegratorDynamics;\n  using COST_T = DoubleIntegratorCircleCost;\n  // using COST_T = QuadraticCost<DYN_T>;\n  using SAMPLER_T = mppi::sampling_distributions::GaussianDistribution<typename DYN_T::DYN_PARAMS_T>;\n  using FB_T = DDPFeedback<DYN_T, num_timesteps>;\n  using control_trajectory = Eigen::Matrix<float, DYN_T::CONTROL_DIM, num_timesteps>;\n  using state_array = DYN_T::state_array;\n  using output_array = DYN_T::output_array;\n  using control_array = DYN_T::control_array;\n\n  float dt = 0.01;\n  float lambda = 1.1f;\n  float alpha = 0.0;\n  float std_dev = 2.0f;\n  float value_func_threshold = 20;\n  float* initial_x_d = nullptr;\n  float* cost_trajectories_d = nullptr;\n  curandGenerator_t gen;\n  cudaStream_t stream;\n  mppi::util::MPPILoggerPtr logger = nullptr;\n\n  void SetUp() override\n  {\n    model = new DYN_T(10);  // Initialize the double integrator DYN_T\n    cost = new COST_T;      // Initialize the cost function\n    auto sampler_params = SAMPLER_T::SAMPLING_PARAMS_T();\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      sampler_params.std_dev[i] = std_dev;\n    }\n    sampler_params.num_timesteps = num_timesteps;\n    sampler = new SAMPLER_T(sampler_params);\n    fb_controller = new FB_T(model, dt);\n\n    HANDLE_CURAND_ERROR(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));\n    unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();\n    HANDLE_CURAND_ERROR(curandSetPseudoRandomGeneratorSeed(gen, seed));\n\n    logger = std::make_shared<mppi::util::MPPILogger>();\n    model->setLogger(logger);\n    cost->setLogger(logger);\n    sampler->setLogger(logger);\n    fb_controller->setLogger(logger);\n\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n    model->bindToStream(stream);\n    cost->bindToStream(stream);\n    sampler->bindToStream(stream);\n    fb_controller->bindToStream(stream);\n    curandSetStream(gen, stream);\n\n    model->GPUSetup();\n    cost->GPUSetup();\n    sampler->GPUSetup();\n    fb_controller->GPUSetup();\n  }\n\n  void TearDown() override\n  {\n    delete model;\n    delete cost;\n    delete sampler;\n    delete fb_controller;\n    if (initial_x_d)\n    {\n      HANDLE_ERROR(cudaFree(initial_x_d));\n      initial_x_d = nullptr;\n    }\n    if (cost_trajectories_d)\n    {\n      HANDLE_ERROR(cudaFree(cost_trajectories_d));\n      cost_trajectories_d = nullptr;\n    }\n  }\n\n  DYN_T* model;\n  COST_T* cost;\n  SAMPLER_T* sampler;\n  FB_T* fb_controller;\n};\n\n// Declare the static variables\nconst int RMPPIKernels::nominal_idx;\nconst int RMPPIKernels::real_idx;\n\n/**\n * @brief Runs the combined init eval kernel and compares to one created on the CPU\n * to ensure they produce the same cost trajectories. Multiple runs of the init eval kernel\n * will be done with different parallelizations to ensure that the kernel calculates the same\n * regardless of the parallelization technique.\n */\nTEST_F(RMPPIKernels, ValidateCombinedInitEvalKernelAgainstCPU)\n{\n  /**\n   * Set up the problem\n   */\n  int num_candidates = 9;\n  int num_samples = 64;\n  int num_rollouts = num_candidates * num_samples;\n\n  /**\n   * Setup GPU\n   */\n  int* strides_d;\n  sampler->setNumRollouts(num_rollouts);\n  HANDLE_ERROR(cudaMalloc((void**)&strides_d, sizeof(int) * num_candidates));\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * num_candidates * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_trajectories_d, sizeof(float) * num_rollouts));\n\n  // Setup inputs on both CPU and GPU\n  Eigen::MatrixXf candidates = Eigen::MatrixXf::Random(DYN_T::STATE_DIM, num_candidates);\n  Eigen::MatrixXi strides = Eigen::MatrixXi::Zero(num_candidates, 1);\n  for (int i = 0; i < num_candidates; i++)\n  {\n    strides(i) = i + 1;\n  }\n  Eigen::MatrixXf trajectory_costs_cpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_gpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n\n  control_trajectory nominal_trajectory = control_trajectory::Random();\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), 0, false);\n  sampler->generateSamples(1, 0, gen, false);\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d, candidates.data(), sizeof(float) * DYN_T::STATE_DIM * num_candidates,\n                               cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(\n      cudaMemcpyAsync(strides_d, strides.data(), sizeof(int) * num_candidates, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  /**\n   * Do CPU calculation\n   */\n  launchCPUInitEvalKernel<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps, num_candidates,\n                                                    num_samples, lambda, alpha, candidates, strides,\n                                                    trajectory_costs_cpu);\n\n  /**\n   * Do GPU Calculation on various thread dimensions\n   */\n  std::vector<int> possible_thread_x;\n  for (int size = num_samples; size > 0; size /= 2)\n  {\n    possible_thread_x.push_back(size);\n  }\n  std::vector<int> possible_thread_y{ 1, 2, 3, 8 };\n  for (const auto& thread_x : possible_thread_x)\n  {\n    for (const auto& thread_y : possible_thread_y)\n    {\n      dim3 threadsPerBlock(thread_x, thread_y, 1);\n      logger->info(\"Testing Combined Eval Kernel on (%d, %d, 1)\\n\", thread_x, thread_y);\n      mppi::kernels::rmppi::launchInitEvalKernel<DYN_T, COST_T, SAMPLER_T>(\n          model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha, num_samples, strides_d, initial_x_d,\n          cost_trajectories_d, threadsPerBlock, stream, false);\n      HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), cost_trajectories_d, sizeof(float) * num_rollouts,\n                                   cudaMemcpyDeviceToHost, stream));\n      HANDLE_ERROR(cudaStreamSynchronize(stream));\n      for (int i = 0; i < num_rollouts; i++)\n      {\n        float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n        std::string error_prefix = \"Eval sample \" + std::to_string(i) + \" (\" + std::to_string(threadsPerBlock.x) +\n                                   \", \" + std::to_string(threadsPerBlock.y) + \", \" + std::to_string(threadsPerBlock.z) +\n                                   \")\";\n        EXPECT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n            << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n            << std::endl;\n      }\n    }\n  }\n  HANDLE_ERROR(cudaFree(strides_d));\n}\n\nTEST_F(RMPPIKernels, ValidateSplitInitEvalKernelAgainstCPU)\n{\n  /**\n   * Set up the problem\n   */\n  int num_candidates = 12;\n  int num_samples = 64;\n  int num_rollouts = num_candidates * num_samples;\n\n  /**\n   * Setup GPU\n   */\n  int* strides_d;\n  float* initial_x_d;\n  float* cost_trajectories_d;\n  float* output_d;\n  sampler->setNumRollouts(num_rollouts);\n  HANDLE_ERROR(cudaMalloc((void**)&strides_d, sizeof(int) * num_candidates));\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * num_candidates * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_trajectories_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * num_rollouts * num_timesteps * DYN_T::OUTPUT_DIM));\n\n  // Setup inputs on both CPU and GPU\n  Eigen::MatrixXf candidates = Eigen::MatrixXf::Random(DYN_T::STATE_DIM, num_candidates) * 4;\n  Eigen::MatrixXi strides = Eigen::MatrixXi::Zero(num_candidates, 1);\n  for (int i = 0; i < num_candidates; i++)\n  {\n    strides(i) = i + 1;\n  }\n  Eigen::MatrixXf trajectory_costs_cpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_gpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n\n  control_trajectory nominal_trajectory = control_trajectory::Random();\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), 0, false);\n  sampler->generateSamples(1, 0, gen, false);\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d, candidates.data(), sizeof(float) * DYN_T::STATE_DIM * num_candidates,\n                               cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(\n      cudaMemcpyAsync(strides_d, strides.data(), sizeof(int) * num_candidates, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  /**\n   * Do CPU calculation\n   */\n  launchCPUInitEvalKernel<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps, num_candidates,\n                                                    num_samples, lambda, alpha, candidates, strides,\n                                                    trajectory_costs_cpu);\n\n  /**\n   * Do GPU Calculation on various thread dimensions\n   */\n  std::vector<int> possible_dyn_thread_x;\n  for (int size = num_samples; size > 0; size /= 2)\n  {\n    possible_dyn_thread_x.push_back(size);\n  }\n  std::vector<int> possible_cost_thread_x;\n  for (int size = num_timesteps; size > 0; size /= 2)\n  {\n    possible_cost_thread_x.push_back(size);\n  }\n  std::vector<int> possible_thread_y{ 1, 2, 3, 8 };\n  for (const auto& dyn_thread_x : possible_dyn_thread_x)\n  {\n    for (const auto& dyn_thread_y : possible_thread_y)\n    {\n      dim3 dynThreadsPerBlock(dyn_thread_x, dyn_thread_y, 1);\n      for (const auto& cost_thread_x : possible_cost_thread_x)\n      {\n        for (const auto& cost_thread_y : possible_thread_y)\n        {\n          dim3 costThreadsPerBlock(cost_thread_x, cost_thread_y, 1);\n          logger->info(\"Testing coalesced Split Eval Kernel with dyn(%d, %d, %d), cost(%d %d, %d)\\n\",\n                       dynThreadsPerBlock.x, dynThreadsPerBlock.y, dynThreadsPerBlock.z, costThreadsPerBlock.x,\n                       costThreadsPerBlock.y, costThreadsPerBlock.z);\n          mppi::kernels::rmppi::launchSplitInitEvalKernel<DYN_T, COST_T, SAMPLER_T, true>(\n              model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha, num_samples, strides_d, initial_x_d,\n              output_d, cost_trajectories_d, dynThreadsPerBlock, costThreadsPerBlock, stream, false);\n          HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), cost_trajectories_d, sizeof(float) * num_rollouts,\n                                       cudaMemcpyDeviceToHost, stream));\n          HANDLE_ERROR(cudaStreamSynchronize(stream));\n          for (int i = 0; i < num_rollouts; i++)\n          {\n            float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n            std::string error_prefix =\n                \"Coalesced eval sample \" + std::to_string(i) + \"/\" + std::to_string(num_rollouts) + \" dyn(\" +\n                std::to_string(dynThreadsPerBlock.x) + \", \" + std::to_string(dynThreadsPerBlock.y) + \", \" +\n                std::to_string(dynThreadsPerBlock.z) + \") cost(\" + std::to_string(costThreadsPerBlock.x) + \", \" +\n                std::to_string(costThreadsPerBlock.y) + \", \" + std::to_string(costThreadsPerBlock.z) + \")\";\n            ASSERT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n                << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n                << std::endl;\n          }\n          logger->info(\"Testing non-coalesced Split Eval Kernel with dyn(%d, %d, %d), cost(%d %d, %d)\\n\",\n                       dynThreadsPerBlock.x, dynThreadsPerBlock.y, dynThreadsPerBlock.z, costThreadsPerBlock.x,\n                       costThreadsPerBlock.y, costThreadsPerBlock.z);\n          mppi::kernels::rmppi::launchSplitInitEvalKernel<DYN_T, COST_T, SAMPLER_T, false>(\n              model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha, num_samples, strides_d, initial_x_d,\n              output_d, cost_trajectories_d, dynThreadsPerBlock, costThreadsPerBlock, stream, false);\n          HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), cost_trajectories_d, sizeof(float) * num_rollouts,\n                                       cudaMemcpyDeviceToHost, stream));\n          HANDLE_ERROR(cudaStreamSynchronize(stream));\n          for (int i = 0; i < num_rollouts; i++)\n          {\n            float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n            std::string error_prefix =\n                \"Non-coalesced eval sample \" + std::to_string(i) + \"/\" + std::to_string(num_rollouts) + \" dyn(\" +\n                std::to_string(dynThreadsPerBlock.x) + \", \" + std::to_string(dynThreadsPerBlock.y) + \", \" +\n                std::to_string(dynThreadsPerBlock.z) + \") cost(\" + std::to_string(costThreadsPerBlock.x) + \", \" +\n                std::to_string(costThreadsPerBlock.y) + \", \" + std::to_string(costThreadsPerBlock.z) + \")\";\n            ASSERT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n                << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n                << std::endl;\n          }\n        }\n      }\n    }\n  }\n  HANDLE_ERROR(cudaFree(strides_d));\n  HANDLE_ERROR(cudaFree(output_d));\n}\n\nTEST_F(RMPPIKernels, ValidateCombinedRMPPIRolloutKernelAgainstCPU)\n{\n  int num_rollouts = 2048;\n\n  sampler->setNumRollouts(num_rollouts);\n  sampler->setNumDistributions(2);\n\n  /**\n   * Setup GPU\n   */\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * 2 * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_trajectories_d, sizeof(float) * 2 * num_rollouts));\n\n  state_array initial_real_state = state_array::Random();\n  state_array initial_nominal_state = state_array::Random();\n\n  control_trajectory nominal_trajectory = control_trajectory::Random();\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), nominal_idx, false);\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), real_idx, false);\n  fb_controller->copyToDevice(false);\n  sampler->generateSamples(1, 0, gen, false);\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d + nominal_idx * DYN_T::STATE_DIM, initial_nominal_state.data(),\n                               sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d + real_idx * DYN_T::STATE_DIM, initial_real_state.data(),\n                               sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  Eigen::MatrixXf trajectory_costs_cpu = Eigen::MatrixXf::Zero(2 * num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_gpu = Eigen::MatrixXf::Zero(2 * num_rollouts, 1);\n\n  /**\n   * CPU Calculation\n   **/\n  launchCPURMPPIRolloutKernel<DYN_T, COST_T, SAMPLER_T, FB_T>(\n      model, cost, sampler, fb_controller, dt, num_timesteps, num_rollouts, lambda, alpha, value_func_threshold,\n      nominal_idx, real_idx, initial_real_state, initial_nominal_state, trajectory_costs_cpu);\n  /**\n   * GPU Calculation\n   **/\n  std::vector<int> possible_thread_x;\n  for (int size = 64; size > 0; size /= 2)\n  {\n    possible_thread_x.push_back(size);\n  }\n  std::vector<int> possible_thread_y{ 1, 2, 3 };\n  for (const auto& thread_x : possible_thread_x)\n  {\n    for (const auto& thread_y : possible_thread_y)\n    {\n      dim3 threadsPerBlock(thread_x, thread_y, 2);\n      logger->info(\"Testing RMPPI Rollout Kernel with (%d, %d, %d)\\n\", threadsPerBlock.x, threadsPerBlock.y,\n                   threadsPerBlock.z);\n      mppi::kernels::rmppi::launchRMPPIRolloutKernel<DYN_T, COST_T, SAMPLER_T, FB_T::TEMPLATED_GPU_FEEDBACK,\n                                                     nominal_idx>(\n          model, cost, sampler, fb_controller->getHostPointer().get(), dt, num_timesteps, num_rollouts, lambda, alpha,\n          value_func_threshold, initial_x_d, cost_trajectories_d, threadsPerBlock, stream, false);\n      HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), cost_trajectories_d, sizeof(float) * 2 * num_rollouts,\n                                   cudaMemcpyDeviceToHost, stream));\n      HANDLE_ERROR(cudaStreamSynchronize(stream));\n      for (int i = 0; i < 2 * num_rollouts; i++)\n      {\n        float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n        std::string error_prefix = \"Rollout sample \" + std::to_string(i) + \" (\" + std::to_string(threadsPerBlock.x) +\n                                   \", \" + std::to_string(threadsPerBlock.y) + \", \" + std::to_string(threadsPerBlock.z) +\n                                   \")\";\n        EXPECT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n            << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n            << std::endl;\n      }\n    }\n  }\n}\n\nTEST_F(RMPPIKernels, ValidateSplitRMPPIRolloutKernelAgainstCPU)\n{\n  int num_rollouts = 2048;\n\n  sampler->setNumRollouts(num_rollouts);\n  sampler->setNumDistributions(2);\n\n  /**\n   * Setup GPU\n   */\n  float* output_d = nullptr;\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * 2 * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_trajectories_d, sizeof(float) * 2 * num_rollouts));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * 2 * num_rollouts * num_timesteps * DYN_T::OUTPUT_DIM));\n\n  state_array initial_real_state = state_array::Random();\n  state_array initial_nominal_state = state_array::Random();\n\n  control_trajectory nominal_trajectory = control_trajectory::Random();\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), nominal_idx, false);\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), real_idx, false);\n  fb_controller->copyToDevice(false);\n  sampler->generateSamples(1, 0, gen, false);\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d + nominal_idx * DYN_T::STATE_DIM, initial_nominal_state.data(),\n                               sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d + real_idx * DYN_T::STATE_DIM, initial_real_state.data(),\n                               sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  Eigen::MatrixXf trajectory_costs_cpu = Eigen::MatrixXf::Zero(2 * num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_gpu = Eigen::MatrixXf::Zero(2 * num_rollouts, 1);\n\n  /**\n   * CPU Calculation\n   **/\n  launchCPURMPPIRolloutKernel<DYN_T, COST_T, SAMPLER_T, FB_T>(\n      model, cost, sampler, fb_controller, dt, num_timesteps, num_rollouts, lambda, alpha, value_func_threshold,\n      nominal_idx, real_idx, initial_real_state, initial_nominal_state, trajectory_costs_cpu);\n  /**\n   * GPU Calculation\n   **/\n  std::vector<int> possible_dyn_thread_x;\n  for (int size = 64; size > 0; size /= 2)\n  {\n    possible_dyn_thread_x.push_back(size);\n  }\n  std::vector<int> possible_cost_thread_x;\n  for (int size = num_timesteps; size > 0; size /= 2)\n  {\n    possible_cost_thread_x.push_back(size);\n  }\n  std::vector<int> possible_thread_y{ 1, 2, 3 };\n  for (const auto& dyn_thread_x : possible_dyn_thread_x)\n  {\n    for (const auto& dyn_thread_y : possible_thread_y)\n    {\n      dim3 dynThreadsPerBlock(dyn_thread_x, dyn_thread_y, 2);\n      for (const auto& cost_thread_x : possible_cost_thread_x)\n      {\n        for (const auto& cost_thread_y : possible_thread_y)\n        {\n          dim3 costThreadsPerBlock(cost_thread_x, cost_thread_y, 2);\n          logger->info(\"Testing coalesced RMPPI Rollout Kernel with dyn(%d, %d, %d), cost(%d %d, %d)\\n\",\n                       dynThreadsPerBlock.x, dynThreadsPerBlock.y, dynThreadsPerBlock.z, costThreadsPerBlock.x,\n                       costThreadsPerBlock.y, costThreadsPerBlock.z);\n          mppi::kernels::rmppi::launchSplitRMPPIRolloutKernel<DYN_T, COST_T, SAMPLER_T, FB_T::TEMPLATED_GPU_FEEDBACK,\n                                                              nominal_idx, true>(\n              model, cost, sampler, fb_controller->getHostPointer().get(), dt, num_timesteps, num_rollouts, lambda,\n              alpha, value_func_threshold, initial_x_d, output_d, cost_trajectories_d, dynThreadsPerBlock,\n              costThreadsPerBlock, stream, false);\n          HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), cost_trajectories_d,\n                                       sizeof(float) * 2 * num_rollouts, cudaMemcpyDeviceToHost, stream));\n          HANDLE_ERROR(cudaStreamSynchronize(stream));\n          for (int i = 0; i < 2 * num_rollouts; i++)\n          {\n            float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n            std::string error_prefix =\n                \"Coalesced rollout sample \" + std::to_string(i) + \"/\" + std::to_string(num_rollouts) + \" dyn(\" +\n                std::to_string(dynThreadsPerBlock.x) + \", \" + std::to_string(dynThreadsPerBlock.y) + \", \" +\n                std::to_string(dynThreadsPerBlock.z) + \") cost(\" + std::to_string(costThreadsPerBlock.x) + \", \" +\n                std::to_string(costThreadsPerBlock.y) + \", \" + std::to_string(costThreadsPerBlock.z) + \")\";\n            ASSERT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n                << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n                << std::endl;\n          }\n          logger->info(\"Testing non-coalesced RMPPI Rollout Kernel with dyn(%d, %d, %d), cost(%d %d, %d)\\n\",\n                       dynThreadsPerBlock.x, dynThreadsPerBlock.y, dynThreadsPerBlock.z, costThreadsPerBlock.x,\n                       costThreadsPerBlock.y, costThreadsPerBlock.z);\n          mppi::kernels::rmppi::launchSplitRMPPIRolloutKernel<DYN_T, COST_T, SAMPLER_T, FB_T::TEMPLATED_GPU_FEEDBACK,\n                                                              nominal_idx, false>(\n              model, cost, sampler, fb_controller->getHostPointer().get(), dt, num_timesteps, num_rollouts, lambda,\n              alpha, value_func_threshold, initial_x_d, output_d, cost_trajectories_d, dynThreadsPerBlock,\n              costThreadsPerBlock, stream, false);\n          HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), cost_trajectories_d,\n                                       sizeof(float) * 2 * num_rollouts, cudaMemcpyDeviceToHost, stream));\n          HANDLE_ERROR(cudaStreamSynchronize(stream));\n          eigen_assert_float_near<Eigen::MatrixXf>(trajectory_costs_cpu, trajectory_costs_gpu, 1e-3);\n          for (int i = 0; i < 2 * num_rollouts; i++)\n          {\n            float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n            std::string error_prefix =\n                \"Non-coalesced rollout sample \" + std::to_string(i) + \"/\" + std::to_string(num_rollouts) + \" dyn(\" +\n                std::to_string(dynThreadsPerBlock.x) + \", \" + std::to_string(dynThreadsPerBlock.y) + \", \" +\n                std::to_string(dynThreadsPerBlock.z) + \") cost(\" + std::to_string(costThreadsPerBlock.x) + \", \" +\n                std::to_string(costThreadsPerBlock.y) + \", \" + std::to_string(costThreadsPerBlock.z) + \")\";\n            ASSERT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n                << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n                << std::endl;\n          }\n        }\n      }\n    }\n  }\n  HANDLE_ERROR(cudaFree(output_d));\n}\n\nTEST_F(RMPPIKernels, ValidateCombinedRMPPIRolloutKernelAgainstMPPIRollout)\n{\n  int num_rollouts = 2048;\n\n  sampler->setNumRollouts(num_rollouts);\n  sampler->setNumDistributions(2);\n\n  // Used to reset control samples between calls to rollout kernels\n  Eigen::MatrixXf control_noise = Eigen::MatrixXf::Zero(DYN_T::CONTROL_DIM, 2 * num_rollouts * num_timesteps);\n\n  /**\n   * Setup GPU\n   */\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * 2 * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&cost_trajectories_d, sizeof(float) * 2 * num_rollouts));\n\n  state_array initial_real_state = state_array::Random();\n\n  control_trajectory nominal_trajectory = control_trajectory::Random();\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), nominal_idx, false);\n  sampler->copyImportanceSamplerToDevice(nominal_trajectory.data(), real_idx, false);\n  fb_controller->copyToDevice(false);\n  sampler->generateSamples(1, 0, gen, false);\n\n  // Get unaltered control noise for resetting purposes\n  HANDLE_ERROR(cudaMemcpyAsync(control_noise.data(), sampler->getControlSample(0, 0, 0),\n                               sizeof(float) * 2 * num_rollouts * num_timesteps * DYN_T::CONTROL_DIM,\n                               cudaMemcpyDeviceToHost, stream));\n  // Start both nominal and real states at the same point\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d + nominal_idx * DYN_T::STATE_DIM, initial_real_state.data(),\n                               sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaMemcpyAsync(initial_x_d + real_idx * DYN_T::STATE_DIM, initial_real_state.data(),\n                               sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  Eigen::MatrixXf trajectory_costs_rmppi = Eigen::MatrixXf::Zero(2 * num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_mppi = Eigen::MatrixXf::Zero(2 * num_rollouts, 1);\n\n  /**\n   * GPU Calculation\n   **/\n  std::vector<int> possible_thread_x;\n  for (int size = 64; size > 0; size /= 2)\n  {\n    possible_thread_x.push_back(size);\n  }\n  std::vector<int> possible_thread_y{ 1, 2, 3 };\n  for (const auto& thread_x : possible_thread_x)\n  {\n    for (const auto& thread_y : possible_thread_y)\n    {\n      dim3 threadsPerBlock(thread_x, thread_y, 2);\n      mppi::kernels::rmppi::launchRMPPIRolloutKernel<DYN_T, COST_T, SAMPLER_T, FB_T::TEMPLATED_GPU_FEEDBACK,\n                                                     nominal_idx>(\n          model, cost, sampler, fb_controller->getHostPointer().get(), dt, num_timesteps, num_rollouts, lambda, alpha,\n          value_func_threshold, initial_x_d, cost_trajectories_d, threadsPerBlock, stream, false);\n      HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_rmppi.data(), cost_trajectories_d, sizeof(float) * 2 * num_rollouts,\n                                   cudaMemcpyDeviceToHost, stream));\n      HANDLE_ERROR(cudaStreamSynchronize(stream));\n      // Reset control samples\n      HANDLE_ERROR(cudaMemcpyAsync(sampler->getControlSample(0, 0, 0), control_noise.data(),\n                                   sizeof(float) * 2 * num_rollouts * num_timesteps * DYN_T::CONTROL_DIM,\n                                   cudaMemcpyHostToDevice, stream));\n      mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps,\n                                                                   num_rollouts, lambda, alpha, initial_x_d,\n                                                                   cost_trajectories_d, threadsPerBlock, stream, false);\n      // Reset control samples\n      HANDLE_ERROR(cudaMemcpyAsync(sampler->getControlSample(0, 0, 0), control_noise.data(),\n                                   sizeof(float) * 2 * num_rollouts * num_timesteps * DYN_T::CONTROL_DIM,\n                                   cudaMemcpyHostToDevice, stream));\n      HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_mppi.data(), cost_trajectories_d, sizeof(float) * 2 * num_rollouts,\n                                   cudaMemcpyDeviceToHost, stream));\n      HANDLE_ERROR(cudaStreamSynchronize(stream));\n      for (int i = 0; i < 2 * num_rollouts; i++)\n      {\n        float cost_diff = trajectory_costs_mppi(i) - trajectory_costs_rmppi(i);\n        std::string error_prefix = \"Rollout sample \" + std::to_string(i) + \" (\" + std::to_string(threadsPerBlock.x) +\n                                   \", \" + std::to_string(threadsPerBlock.y) + \", \" + std::to_string(threadsPerBlock.z) +\n                                   \")\";\n        EXPECT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_mppi(i))\n            << error_prefix << \": MPPI = \" << trajectory_costs_mppi(i) << \", R-MPPI = \" << trajectory_costs_rmppi(i)\n            << std::endl;\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "tests/mppi_core/rollout_kernel_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <kernel_tests/core/rollout_kernel_test.cuh>\n#include <mppi/cost_functions/autorally/ar_standard_cost.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n#include <mppi/dynamics/autorally/ar_nn_model.cuh>\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/utils/test_helper.h>\n\n#include <autorally_test_map.h>\n#include <autorally_test_network.h>\n#include <random>\n\n#include <autorally_test_network.h>\n#include <autorally_test_map.h>\n/*\n * Here we will test various device functions that are related to cuda kernel things.\n */\n\nTEST(RolloutKernel, loadGlobalToShared)\n{\n  const int STATE_DIM = 12;\n  const int CONTROL_DIM = 3;\n  std::vector<float> x0_host = { 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2 };\n\n  std::vector<float> x_thread_host(STATE_DIM, 0.f);\n  std::vector<float> xdot_thread_host(STATE_DIM, 2.f);\n  std::vector<float> u_thread_host(CONTROL_DIM, 3.f);\n\n  launchGlobalToShared_KernelTest(x0_host, x_thread_host, xdot_thread_host, u_thread_host);\n\n  array_assert_float_eq(x0_host, x_thread_host, STATE_DIM);\n  array_assert_float_eq(0.f, xdot_thread_host, STATE_DIM);\n  array_assert_float_eq(0.f, u_thread_host, CONTROL_DIM);\n}\n\nTEST(RolloutKernel, loadGlobalToSharedNominalAndActualState)\n{\n  const int STATE_DIM = 12;\n  const int CONTROL_DIM = 3;\n  std::vector<float> x0_host_act = { 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2 };\n\n  std::vector<float> x0_host_nom = { 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2.0, 2.1, 2.2 };\n\n  std::vector<float> x_thread_host_act(STATE_DIM, 0.f);\n  std::vector<float> x_thread_host_nom(STATE_DIM, 0.f);\n  std::vector<float> xdot_thread_host_act(STATE_DIM, 2.f);\n  std::vector<float> xdot_thread_host_nom(STATE_DIM, 2.f);\n\n  std::vector<float> u_thread_host_act(CONTROL_DIM, 3.f);\n  std::vector<float> u_thread_host_nom(CONTROL_DIM, 3.f);\n\n  launchGlobalToShared_KernelTest_nom_act(x0_host_act, x_thread_host_act, xdot_thread_host_act, u_thread_host_act,\n                                          x0_host_nom, x_thread_host_nom, xdot_thread_host_nom, u_thread_host_nom);\n\n  // std::cout << \"Testing actual x0\" << std::endl;\n  array_assert_float_eq(x0_host_act, x_thread_host_act, STATE_DIM);\n  // std::cout << \"Testing nom x0\" << std::endl;\n  array_assert_float_eq(x0_host_nom, x_thread_host_nom, STATE_DIM);\n  // std::cout << \"Testing empty\" << std::endl;\n  array_assert_float_eq(0.f, xdot_thread_host_act, STATE_DIM);\n  array_assert_float_eq(0.f, xdot_thread_host_nom, STATE_DIM);\n  array_assert_float_eq(0.f, u_thread_host_act, CONTROL_DIM);\n  array_assert_float_eq(0.f, u_thread_host_nom, CONTROL_DIM);\n}\n\nTEST(RolloutKernel, computeAndSaveCostAllRollouts)\n{\n  // Define an assortment of costs for a given number of rollouts\n  CartpoleQuadraticCost cost;\n  cost.GPUSetup();\n\n  const int num_rollouts = 1234;\n  std::array<float, num_rollouts> cost_all_rollouts = { 0 };\n  std::array<float, CartpoleDynamics::STATE_DIM* num_rollouts> x_traj_terminal = { 0 };\n  std::array<float, num_rollouts> cost_known = { 0 };\n  std::array<float, num_rollouts> cost_compute = { 0 };\n\n  std::default_random_engine generator(7.0);\n  std::normal_distribution<float> distribution(1.0, 2.0);\n\n  for (auto& costs : cost_all_rollouts)\n  {\n    costs = 10 * distribution(generator);\n  }\n\n  for (auto& state : x_traj_terminal)\n  {\n    state = distribution(generator);\n  }\n  // Compute terminal cost on CPU\n  for (int i = 0; i < num_rollouts; ++i)\n  {\n    cost_known[i] =\n        cost_all_rollouts[i] +\n        (x_traj_terminal[CartpoleDynamics::STATE_DIM * i] * x_traj_terminal[CartpoleDynamics::STATE_DIM * i] *\n             cost.getParams().cart_position_coeff +\n         x_traj_terminal[CartpoleDynamics::STATE_DIM * i + 1] * x_traj_terminal[CartpoleDynamics::STATE_DIM * i + 1] *\n             cost.getParams().cart_velocity_coeff +\n         x_traj_terminal[CartpoleDynamics::STATE_DIM * i + 2] * x_traj_terminal[CartpoleDynamics::STATE_DIM * i + 2] *\n             cost.getParams().pole_angle_coeff +\n         x_traj_terminal[CartpoleDynamics::STATE_DIM * i + 3] * x_traj_terminal[CartpoleDynamics::STATE_DIM * i + 3] *\n             cost.getParams().pole_angular_velocity_coeff) *\n            cost.getParams().terminal_cost_coeff;\n  }\n\n  // Compute the dynamics on the GPU\n  launchComputeAndSaveCostAllRollouts_KernelTest<CartpoleQuadraticCost, CartpoleDynamics::STATE_DIM, num_rollouts>(\n      cost, cost_all_rollouts, x_traj_terminal, cost_compute);\n\n  array_assert_float_eq<num_rollouts>(cost_known, cost_compute);\n}\n\nclass RolloutKernelTests : public ::testing::Test\n{\npublic:\n  using DYN_T = CartpoleDynamics;\n  using COST_T = CartpoleQuadraticCost;\n  using DYN_PARAMS_T = typename DYN_T::DYN_PARAMS_T;\n  using COST_PARAMS_T = typename COST_T::COST_PARAMS_T;\n  using SAMPLER_T = mppi::sampling_distributions::GaussianDistribution<DYN_PARAMS_T>;\n  using SAMPLER_PARAMS_T = typename SAMPLER_T::SAMPLING_PARAMS_T;\n  using state_array = DYN_T::state_array;\n\n  float dt = 0.01;\n  float lambda = 0.5f;\n  float alpha = 0.001f;\n  float control_std_dev = 0.4f;\n  int num_timesteps = 100;\n  int num_rollouts = 2048;\n\n  cudaStream_t stream;\n  DYN_T* model;\n  COST_T* cost;\n  SAMPLER_T* sampler;\n  mppi::util::MPPILoggerPtr logger;\n\n  void SetUp() override\n  {\n    model = new DYN_T();\n    cost = new COST_T();\n    sampler = new SAMPLER_T();\n    logger = std::make_shared<mppi::util::MPPILogger>();\n    model->setLogger(logger);\n    cost->setLogger(logger);\n    sampler->setLogger(logger);\n\n    SAMPLER_PARAMS_T sampler_params;\n    for (int i = 0; i < DYN_T::CONTROL_DIM; i++)\n    {\n      sampler_params.std_dev[i] = control_std_dev;\n    }\n    sampler_params.num_rollouts = num_rollouts;\n    sampler_params.num_timesteps = num_timesteps;\n    sampler->setParams(sampler_params);\n\n    COST_PARAMS_T cost_params;\n    cost_params.cart_position_coeff = 100;\n    cost_params.pole_angle_coeff = 200;\n    cost_params.cart_velocity_coeff = 10;\n    cost_params.pole_angular_velocity_coeff = 20;\n    cost_params.control_cost_coeff[0] = 1;\n    cost_params.terminal_cost_coeff = 0;\n    cost_params.desired_terminal_state[0] = -20;\n    cost_params.desired_terminal_state[1] = 0;\n    cost_params.desired_terminal_state[2] = M_PI;\n    cost_params.desired_terminal_state[3] = 0;\n    cost->setParams(cost_params);\n\n    HANDLE_ERROR(cudaStreamCreate(&stream));\n  }\n\n  void TearDown() override\n  {\n    delete model;\n    delete cost;\n    delete sampler;\n  }\n};\n\nTEST_F(RolloutKernelTests, runRolloutKernelOnMultipleSystems)\n{\n  std::vector<float> x0(DYN_T::STATE_DIM);\n  Eigen::MatrixXf nom_control = Eigen::MatrixXf::Random(DYN_T::CONTROL_DIM, num_timesteps);\n  std::vector<float> nominal_control_seq(nom_control.data(), nom_control.data() + nom_control.size());\n  std::vector<float> trajectory_costs_act(num_rollouts);\n  std::vector<float> trajectory_costs_nom(num_rollouts);\n\n  // set initial state\n  for (size_t i = 0; i < x0.size(); i++)\n  {\n    x0[i] = i * 0.1 + 0.2;\n  }\n  launchRolloutKernel_nom_act<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps, num_rollouts, lambda,\n                                                        alpha, x0, nominal_control_seq, trajectory_costs_act,\n                                                        trajectory_costs_nom, stream);\n  array_assert_float_eq(trajectory_costs_act, trajectory_costs_nom, num_rollouts);\n}\n\nTEST_F(RolloutKernelTests, CombinedRolloutKernelGPUvsCPU)\n{\n  state_array x0 = state_array::Random();\n\n  /**\n   * GPU Setup\n   **/\n  model->GPUSetup();\n  cost->GPUSetup();\n  sampler->GPUSetup();\n\n  Eigen::MatrixXf control_seq = Eigen::MatrixXf::Random(DYN_T::CONTROL_DIM, num_timesteps * num_rollouts);\n  sampler->copyImportanceSamplerToDevice(control_seq.data(), 0, false);\n\n  // Generate samples and do stream synchronize\n  logger->debug(\"Generating samples\\n\");\n  curandGenerator_t gen;\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetStream(gen, stream);\n  sampler->generateSamples(1, 0, gen, true);\n\n  Eigen::MatrixXf trajectory_costs_cpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_gpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n\n  logger->debug(\"Running CPU Rollout\\n\");\n  launchCPURolloutKernel<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha,\n                                                   x0, trajectory_costs_cpu, stream);\n\n  /**\n    GPU Computations\n    **/\n  float* initial_x_d;\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(\n      cudaMemcpyAsync(initial_x_d, x0.data(), sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  std::vector<int> possible_thread_x;\n  for (int i = 64; i > 0; i /= 2)\n  {\n    possible_thread_x.push_back(i);\n  }\n  std::vector<int> possible_thread_y = { 1, 2, 3, 4 };\n\n  for (const auto& thread_x : possible_thread_x)\n  {\n    for (const auto& thread_y : possible_thread_y)\n    {\n      dim3 threadsPerBlock(thread_x, thread_y, 1);\n      logger->debug(\"Running GPU Combined Rollout on (%d, %d, %d)\\n\", threadsPerBlock.x, threadsPerBlock.y,\n                    threadsPerBlock.z);\n      mppi::kernels::launchRolloutKernel<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps,\n                                                                   num_rollouts, lambda, alpha, initial_x_d,\n                                                                   trajectory_costs_d, threadsPerBlock, stream, false);\n      HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), trajectory_costs_d, sizeof(float) * num_rollouts,\n                                   cudaMemcpyDeviceToHost, stream));\n      HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n      eigen_assert_float_near<Eigen::MatrixXf>(trajectory_costs_cpu, trajectory_costs_gpu, 1e-4f);\n    }\n  }\n}\n\nTEST_F(RolloutKernelTests, SplitRolloutKernelGPUvsCPU)\n{\n  state_array x0 = state_array::Random();\n\n  /**\n   * GPU Setup\n   **/\n  model->GPUSetup();\n  cost->GPUSetup();\n  sampler->GPUSetup();\n\n  Eigen::MatrixXf control_seq = Eigen::MatrixXf::Random(DYN_T::CONTROL_DIM, num_timesteps * num_rollouts);\n  sampler->copyImportanceSamplerToDevice(control_seq.data(), 0, false);\n\n  // Generate samples and do stream synchronize\n  logger->debug(\"Generating samples\\n\");\n  curandGenerator_t gen;\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetStream(gen, stream);\n  sampler->generateSamples(1, 0, gen, true);\n\n  Eigen::MatrixXf trajectory_costs_cpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n  Eigen::MatrixXf trajectory_costs_gpu = Eigen::MatrixXf::Zero(num_rollouts, 1);\n\n  logger->debug(\"Running CPU Rollout\\n\");\n  launchCPURolloutKernel<DYN_T, COST_T, SAMPLER_T>(model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha,\n                                                   x0, trajectory_costs_cpu, stream);\n\n  /**\n    GPU Computations\n    **/\n  float* initial_x_d;\n  float* output_d;\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&initial_x_d, sizeof(float) * DYN_T::STATE_DIM));\n  HANDLE_ERROR(cudaMalloc((void**)&output_d, sizeof(float) * DYN_T::OUTPUT_DIM * num_rollouts * num_timesteps));\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(\n      cudaMemcpyAsync(initial_x_d, x0.data(), sizeof(float) * DYN_T::STATE_DIM, cudaMemcpyHostToDevice, stream));\n  std::vector<int> possible_dyn_thread_x;\n  for (int i = 128; i > 0; i /= 2)\n  {\n    possible_dyn_thread_x.push_back(i);\n  }\n  std::vector<int> possible_dyn_thread_y = { 1, 2, 3, 4 };\n\n  std::vector<int> possible_cost_thread_x;\n  for (int i = num_timesteps; i > 0; i /= 2)\n  {\n    possible_cost_thread_x.push_back(i);\n  }\n  std::vector<int> possible_cost_thread_y = { 1, 2, 3, 4 };\n\n  for (const auto& dyn_thread_x : possible_dyn_thread_x)\n  {\n    for (const auto& dyn_thread_y : possible_dyn_thread_y)\n    {\n      for (const auto& cost_thread_x : possible_cost_thread_x)\n      {\n        for (const auto& cost_thread_y : possible_cost_thread_y)\n        {\n          dim3 dynThreadsPerBlock(dyn_thread_x, dyn_thread_y, 1);\n          dim3 costThreadsPerBlock(cost_thread_x, cost_thread_y, 1);\n          logger->debug(\"Running coalesced GPU Split Rollout with dyn(%d, %d, %d), cost(%d, %d, %d)\\n\",\n                        dynThreadsPerBlock.x, dynThreadsPerBlock.y, dynThreadsPerBlock.z, costThreadsPerBlock.x,\n                        costThreadsPerBlock.y, costThreadsPerBlock.z);\n          mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLER_T, true>(\n              model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha, initial_x_d, output_d,\n              trajectory_costs_d, dynThreadsPerBlock, costThreadsPerBlock, stream, false);\n          HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), trajectory_costs_d, sizeof(float) * num_rollouts,\n                                       cudaMemcpyDeviceToHost, stream));\n          HANDLE_ERROR(cudaStreamSynchronize(stream));\n          // eigen_assert_float_near<Eigen::MatrixXf>(trajectory_costs_cpu, trajectory_costs_gpu, 1e-4f);\n          for (int i = 0; i < num_rollouts; i++)\n          {\n            float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n            std::string error_prefix =\n                \"Split Rollout sample \" + std::to_string(i) + \" dyn(\" + std::to_string(dynThreadsPerBlock.x) + \", \" +\n                std::to_string(dynThreadsPerBlock.y) + \", \" + std::to_string(dynThreadsPerBlock.z) + +\" cost(\" +\n                std::to_string(costThreadsPerBlock.x) + \", \" + std::to_string(costThreadsPerBlock.y) + \", \" +\n                std::to_string(costThreadsPerBlock.z) + \")\";\n            ASSERT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n                << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n                << std::endl;\n          }\n\n          logger->debug(\"Running non-coalesced GPU Split Rollout with dyn(%d, %d, %d), cost(%d, %d, %d)\\n\",\n                        dynThreadsPerBlock.x, dynThreadsPerBlock.y, dynThreadsPerBlock.z, costThreadsPerBlock.x,\n                        costThreadsPerBlock.y, costThreadsPerBlock.z);\n          mppi::kernels::launchSplitRolloutKernel<DYN_T, COST_T, SAMPLER_T, false>(\n              model, cost, sampler, dt, num_timesteps, num_rollouts, lambda, alpha, initial_x_d, output_d,\n              trajectory_costs_d, dynThreadsPerBlock, costThreadsPerBlock, stream, false);\n          HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs_gpu.data(), trajectory_costs_d, sizeof(float) * num_rollouts,\n                                       cudaMemcpyDeviceToHost, stream));\n          HANDLE_ERROR(cudaStreamSynchronize(stream));\n          for (int i = 0; i < num_rollouts; i++)\n          {\n            float cost_diff = trajectory_costs_cpu(i) - trajectory_costs_gpu(i);\n            std::string error_prefix =\n                \"Split Rollout sample \" + std::to_string(i) + \" dyn(\" + std::to_string(dynThreadsPerBlock.x) + \", \" +\n                std::to_string(dynThreadsPerBlock.y) + \", \" + std::to_string(dynThreadsPerBlock.z) + +\" cost(\" +\n                std::to_string(costThreadsPerBlock.x) + \", \" + std::to_string(costThreadsPerBlock.y) + \", \" +\n                std::to_string(costThreadsPerBlock.z) + \")\";\n            ASSERT_LT(fabsf(cost_diff), 1e-3 * trajectory_costs_cpu(i))\n                << error_prefix << \": CPU = \" << trajectory_costs_cpu(i) << \", GPU = \" << trajectory_costs_gpu(i)\n                << std::endl;\n          }\n          // eigen_assert_float_near<Eigen::MatrixXf>(trajectory_costs_cpu, trajectory_costs_gpu, 1e-4f);\n        }\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "tests/mppi_core/viz_kernels_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n#include <mppi/feedback_controllers/DDP/ddp.cuh>\n#include <mppi/core/mppi_common.cuh>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n\nclass VizualizationKernelsTest : public ::testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    CartpoleQuadraticCostParams new_params;\n    new_params.cart_position_coeff = 100;\n    new_params.pole_angle_coeff = 200;\n    new_params.cart_velocity_coeff = 10;\n    new_params.pole_angular_velocity_coeff = 20;\n    new_params.control_cost_coeff[0] = 1;\n    new_params.terminal_cost_coeff = 0;\n    new_params.desired_terminal_state[0] = -20;\n    new_params.desired_terminal_state[1] = 0;\n    new_params.desired_terminal_state[2] = M_PI;\n    new_params.desired_terminal_state[3] = 0;\n    cost.setParams(new_params);\n\n    cudaStreamCreate(&stream);\n\n    /**\n     * Ensure dynamics and costs exist on GPU\n     */\n    dynamics.bindToStream(stream);\n    cost.bindToStream(stream);\n    // Call the GPU setup functions of the model and cost\n    dynamics.GPUSetup();\n    cost.GPUSetup();\n    fb_controller.GPUSetup();\n\n    for (int i = 0; i < x0.rows(); i++)\n    {\n      x0(i) = i * 0.1 + 0.2;\n    }\n\n    for (int sample = 0; sample < num_rollouts; sample++)\n    {\n      control[sample] = control_trajectory::Random();\n    }\n\n    // Create x init cuda array\n    HANDLE_ERROR(cudaMalloc((void**)&initial_state_d, sizeof(float) * CartpoleDynamics::STATE_DIM * 2));\n    // create control u trajectory cuda array\n    HANDLE_ERROR(\n        cudaMalloc((void**)&control_d, sizeof(float) * CartpoleDynamics::CONTROL_DIM * MAX_TIMESTEPS * num_rollouts));\n    // Create cost trajectory cuda array\n    HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * num_rollouts * (MAX_TIMESTEPS + 1) * 2));\n    // Create result state cuda array\n    HANDLE_ERROR(cudaMalloc((void**)&result_state_d,\n                            sizeof(float) * num_rollouts * MAX_TIMESTEPS * CartpoleDynamics::STATE_DIM * 2));\n    // Create result state cuda array\n    HANDLE_ERROR(cudaMalloc((void**)&crash_status_d, sizeof(float) * num_rollouts * MAX_TIMESTEPS * 2));\n  }\n\n  void TearDown() override\n  {\n    cudaFree(initial_state_d);\n    cudaFree(control_d);\n    cudaFree(trajectory_costs_d);\n    cudaFree(crash_status_d);\n    cudaFree(result_state_d);\n  }\n\n  const static int MAX_TIMESTEPS = 100;\n\n  typedef Eigen::Matrix<float, CartpoleDynamics::STATE_DIM, MAX_TIMESTEPS> state_trajectory;      // A state trajectory\n  typedef Eigen::Matrix<float, CartpoleDynamics::CONTROL_DIM, MAX_TIMESTEPS> control_trajectory;  // A control\n                                                                                                  // trajectory\n  typedef Eigen::Matrix<float, MAX_TIMESTEPS + 1, 1> cost_trajectory;\n  typedef Eigen::Matrix<int, MAX_TIMESTEPS, 1> crash_status_trajectory;\n\n  CartpoleDynamics dynamics = CartpoleDynamics(1, 1, 1);\n  CartpoleQuadraticCost cost;\n  DDPFeedback<CartpoleDynamics, MAX_TIMESTEPS> fb_controller =\n      DDPFeedback<CartpoleDynamics, MAX_TIMESTEPS>(&dynamics, dt);\n\n  float dt = 0.02;\n  int num_rollouts = 20;\n  float lambda = 0.5;\n  float alpha = 0.001;\n\n  cudaStream_t stream;\n\n  float* initial_state_d;\n  float* control_d;\n  float* trajectory_costs_d;\n  float* result_state_d;\n  int* crash_status_d;\n\n  CartpoleDynamics::state_array x0;\n\n  std::vector<control_trajectory> control = std::vector<control_trajectory>(num_rollouts);\n  std::vector<state_trajectory> result_state = std::vector<state_trajectory>(num_rollouts);\n  std::vector<cost_trajectory> trajectory_costs = std::vector<cost_trajectory>(num_rollouts);\n  std::vector<crash_status_trajectory> crash_status = std::vector<crash_status_trajectory>(num_rollouts);\n};\n\n// TEST_F(VizualizationKernelsTest, stateAndCostTrajectoryKernelNoZNoFeedbackTest)\n// {\n//   for (int tdy = 1; tdy < 8; tdy++)\n//   {\n//     /**\n//      * Fill in GPU arrays\n//      */\n//     HANDLE_ERROR(cudaMemcpyAsync(initial_state_d, x0.data(), sizeof(float) * CartpoleDynamics::STATE_DIM,\n//                                  cudaMemcpyHostToDevice, stream));\n//     HANDLE_ERROR(cudaMemcpyAsync(initial_state_d + CartpoleDynamics::STATE_DIM, x0.data(),\n//                                  sizeof(float) * CartpoleDynamics::STATE_DIM, cudaMemcpyHostToDevice, stream));\n//     for (int i = 0; i < num_rollouts; i++)\n//     {\n//       HANDLE_ERROR(cudaMemcpyAsync(control_d + i * MAX_TIMESTEPS * CartpoleDynamics::CONTROL_DIM, control[i].data(),\n//                                    MAX_TIMESTEPS * CartpoleDynamics::CONTROL_DIM * sizeof(float),\n//                                    cudaMemcpyHostToDevice, stream));\n//     }\n//     HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n//     const int gridsize_x = (num_rollouts - 1) / 32 + 1;\n//     dim3 dimBlock(32, tdy, 1);\n//     dim3 dimGrid(gridsize_x, 1, 1);\n//     mppi_common::stateAndCostTrajectoryKernel<CartpoleDynamics, CartpoleQuadraticCost,\n//                                               DeviceDDP<CartpoleDynamics, MAX_TIMESTEPS>, 32, 1>\n//         <<<dimGrid, dimBlock, 0, stream>>>(dynamics.model_d_, cost.cost_d_, fb_controller.getDevicePointer(),\n//         control_d,\n//                                            initial_state_d, result_state_d, trajectory_costs_d, crash_status_d,\n//                                            num_rollouts, MAX_TIMESTEPS, dt, -1);\n\n//     // Copy the results back to the host\n//     for (int i = 0; i < num_rollouts; i++)\n//     {\n//       result_state[i].col(0) = x0;\n//       // shifted by one since we do not save the initial state\n//       HANDLE_ERROR(cudaMemcpyAsync(result_state[i].data() + (CartpoleDynamics::STATE_DIM),\n//                                    result_state_d + i * MAX_TIMESTEPS * CartpoleDynamics::STATE_DIM,\n//                                    (MAX_TIMESTEPS - 1) * CartpoleDynamics::STATE_DIM * sizeof(float),\n//                                    cudaMemcpyDeviceToHost, stream));\n//       HANDLE_ERROR(cudaMemcpyAsync(trajectory_costs[i].data(), trajectory_costs_d + i * (MAX_TIMESTEPS + 1),\n//                                    (MAX_TIMESTEPS + 1) * sizeof(float), cudaMemcpyDeviceToHost, stream));\n//       HANDLE_ERROR(cudaMemcpyAsync(crash_status[i].data(), crash_status_d + i * MAX_TIMESTEPS,\n//                                    MAX_TIMESTEPS * sizeof(float), cudaMemcpyDeviceToHost, stream));\n//     }\n//     HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n//     for (int sample = 0; sample < num_rollouts; sample++)\n//     {\n//       CartpoleDynamics::state_array x = x0;\n//       CartpoleDynamics::state_array x_dot;\n//       control_trajectory u_traj = control[sample];\n//       int crash_status_val = 0;\n\n//       int t = 0;\n//       for (; t < MAX_TIMESTEPS; t++)\n//       {\n//         EXPECT_NEAR(x(0), result_state[sample].col(t)(0), 1e-5)\n//             << \"\\ntdy: \" << tdy << \"\\nsample: \" << sample << \"\\nat time: \" << t;\n//         EXPECT_NEAR(x(1), result_state[sample].col(t)(1), 1e-5)\n//             << \"\\ntdy: \" << tdy << \"\\nsample: \" << sample << \"\\nat time: \" << t;\n//         EXPECT_NEAR(x(2), result_state[sample].col(t)(2), 1e-5)\n//             << \"\\ntdy: \" << tdy << \"\\nsample: \" << sample << \"\\nat time: \" << t;\n//         EXPECT_NEAR(x(3), result_state[sample].col(t)(3), 1e-5)\n//             << \"\\ntdy: \" << tdy << \"\\nsample: \" << sample << \"\\nat time: \" << t;\n\n//         CartpoleDynamics::control_array u = u_traj.col(t);\n//         float cost_val = cost.computeStateCost(x, t, &crash_status_val);\n//         if (t == 0)\n//         {\n//           // don't weight the first state\n//           EXPECT_FLOAT_EQ(trajectory_costs[sample](t), 0);\n//         }\n//         else\n//         {\n//           EXPECT_FLOAT_EQ(cost_val, trajectory_costs[sample](t)) << \"\\nsample: \" << sample << \"\\nat time: \" << t;\n//         }\n//         EXPECT_EQ(crash_status_val, crash_status[sample](t)) << \"\\nsample: \" << sample << \"\\nat time: \" << t;\n\n//         dynamics.enforceConstraints(x, u);\n//         dynamics.computeStateDeriv(x, u, x_dot);\n//         dynamics.updateState(x, x_dot, dt);\n//       }\n//       float terminal_cost = cost.terminalCost(x);\n//       EXPECT_FLOAT_EQ(terminal_cost, trajectory_costs[sample](t)) << \"\\nsample: \" << sample << \"\\nat terminal\";\n//     }\n//   }\n// }\n"
  },
  {
    "path": "tests/mppi_core/weightedreduction_kernel_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <kernel_tests/core/weightedreduction_kernel_test.cuh>\n#include <mppi/utils/test_helper.h>\n#include <random>\n\nTEST(WeightedReductionKernel, setInitialControlToZero)\n{\n  const int num_threads = 100;\n  const int control_dim = 5;\n\n  std::array<float, control_dim> u_host = { 1 };\n  std::array<float, num_threads* control_dim> u_intermediate_host = { 1 };\n\n  launchSetInitialControlToZero_KernelTest<num_threads, control_dim>(u_host, u_intermediate_host);\n\n  array_assert_float_eq<control_dim>(0.f, u_host);\n  array_assert_float_eq<control_dim * num_threads>(0.f, u_intermediate_host);\n}\n\nTEST(WeightedReductionKernel, strideControlWeightReduction)\n{\n  auto generator = std::default_random_engine(7.0);\n  auto distribution = std::normal_distribution<float>(1.0, 0.2);\n  const int sum_stride = 64;\n  float normalizer = 1000;\n\n  const int num_rollouts = 1024;\n  const int control_dim = 6;\n  const int num_timesteps = 100;\n\n  std::array<float, num_rollouts> exp_costs_host = { 1 };\n  std::array<float, num_rollouts* num_timesteps* control_dim> v_host = { 1 };  // Disturbed control\n\n  for (size_t i = 1; i < exp_costs_host.size(); ++i)\n  {\n    exp_costs_host[i] = 0.001 * distribution(generator);\n  }\n\n  for (size_t i = 1; i < v_host.size(); ++i)\n  {\n    v_host[i] = distribution(generator);\n  }\n\n  const int cell_size = ((num_rollouts - 1) / sum_stride + 1);\n  std::array<float, cell_size* num_timesteps* control_dim> u_intermediate_host = { 0 };\n\n  launchStrideControlWeightReduction_KernelTest<control_dim, num_rollouts, num_timesteps, sum_stride>(\n      normalizer, exp_costs_host, v_host, u_intermediate_host);\n  CudaCheckError();\n\n  std::array<float, num_timesteps*((num_rollouts - 1) / sum_stride + 1)* control_dim> u_intermediate_known = { 0 };\n\n  // Compute weights per rollouts\n  std::array<float, num_rollouts> rollout_weights = { 0 };\n  for (size_t i = 0; i < rollout_weights.size(); ++i)\n  {\n    rollout_weights[i] = exp_costs_host[i] / normalizer;\n  }\n\n  // Weight all the controls with the appropriate cost\n  std::array<float, num_timesteps* num_rollouts* control_dim> weighted_controls = { 0 };\n  for (int i = 0; i < num_rollouts; ++i)\n  {\n    for (int j = 0; j < num_timesteps; ++j)\n    {\n      for (int k = 0; k < control_dim; ++k)\n      {\n        int index = i * num_timesteps * control_dim + j * control_dim + k;\n        weighted_controls[index] = rollout_weights[i] * v_host[index];\n      }\n    }\n  }\n\n  // Iterate through each stride\n  for (int i = 0; i < ((num_rollouts - 1) / sum_stride + 1); ++i)\n  {  // i is the cell index\n    for (int j = 0; j < sum_stride; ++j)\n    {  // j is the stride index\n      for (int k = 0; k < num_timesteps; ++k)\n      {  // k specifies the current timestep\n        for (int w = 0; w < control_dim; ++w)\n        {\n          int rollout_index = (i * sum_stride + j);\n          if (rollout_index < num_rollouts)\n          {\n            u_intermediate_known[k * cell_size * control_dim + i * control_dim + w] +=\n                weighted_controls[rollout_index * num_timesteps * control_dim + k * control_dim + w];\n          }\n        }\n      }\n    }\n  }\n\n  array_assert_float_eq<cell_size * num_timesteps * control_dim>(u_intermediate_known, u_intermediate_host);\n}\n\nTEST(WeightedReductionKernel, rolloutWeightReductionAndSaveControl)\n{\n  auto generator = std::default_random_engine(7.0);\n  auto distribution = std::normal_distribution<float>(1.0, 0.2);\n  const int sum_stride = 64;\n\n  const int num_rollouts = 1024;\n  const int control_dim = 6;\n  const int num_timesteps = 100;\n  const int cell_size = ((num_rollouts - 1) / sum_stride + 1);\n  std::array<float, cell_size* num_timesteps* control_dim> u_intermediate_host = { 0 };\n  std::array<float, num_timesteps* control_dim> du_new_compute = { 0 };  // Update control\n  std::array<float, num_timesteps* control_dim> du_new_known = { 0 };\n\n  for (size_t i = 0; i < u_intermediate_host.size(); ++i)\n  {\n    u_intermediate_host[i] = distribution(generator);\n  }\n\n  // Compute the control update\n  for (int i = 0; i < ((num_rollouts - 1) / sum_stride + 1); ++i)\n  {  // i is the cell index\n    for (int k = 0; k < num_timesteps; ++k)\n    {  // k specifies the current timestep\n      for (int w = 0; w < control_dim; ++w)\n      {\n        du_new_known[k * control_dim + w] += u_intermediate_host[k * cell_size * control_dim + i * control_dim + w];\n      }\n    }\n  }\n\n  // Launch the test kernel\n  launchRolloutWeightReductionAndSaveControl_KernelTest<control_dim, num_rollouts, num_timesteps, sum_stride>(\n      u_intermediate_host, du_new_compute);\n\n  array_assert_float_eq<num_timesteps * control_dim>(du_new_known, du_new_compute);\n}\n\nTEST(WeightedReductionKernel, comparisonTestAutorallyMPPI_Generic)\n{\n  auto generator = std::default_random_engine(7.0);\n  auto distribution = std::normal_distribution<float>(5.0, 1.2);\n  const int sum_stride = 64;\n\n  const int num_rollouts = 1024;\n  const int control_dim = 4;\n  const int num_timesteps = 100;\n\n  std::array<float, num_rollouts> exp_costs;\n  std::array<float, control_dim * num_rollouts * num_timesteps> perturbed_controls;\n  std::array<float, control_dim * num_timesteps> controls_out_autorally;\n  std::array<float, control_dim * num_timesteps> controls_out_mppi_generic;\n\n  // Initialize the exp costs with positive numbers\n  for (float& exp_cost : exp_costs)\n  {\n    exp_cost = expf(-1 * distribution(generator));\n  }\n  exp_costs[0] = 0;  // Minimum cost\n\n  // Initialize the control perturbations with random numbers\n  for (float& control : perturbed_controls)\n  {\n    control = distribution(generator);\n  }\n\n  // The normalizer is the sum of all exponential costs;\n  float normalizer = std::accumulate(exp_costs.begin(), exp_costs.end(), 0.0);\n\n  launchWeightedReductionKernelTest<control_dim, num_rollouts, sum_stride, num_timesteps>(\n      exp_costs, perturbed_controls, normalizer, controls_out_mppi_generic, 0);\n\n  launchAutoRallyWeightedReductionKernelTest<control_dim, num_rollouts, sum_stride, num_timesteps>(\n      exp_costs, perturbed_controls, normalizer, controls_out_autorally, 0);\n\n  array_expect_float_eq<num_timesteps * control_dim>(controls_out_mppi_generic, controls_out_autorally);\n}\n"
  },
  {
    "path": "tests/nn_helpers/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n  target_link_libraries(${T_NAME}\n          ${GTEST_LIBRARIES}\n          ${MPPI_HEADER_LIBRARY_NAME})\n  gtest_discover_tests(${T_NAME})\n  set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/nn_helpers/activation_functions_tests.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n#include <math.h>\n#include <cmath>\n\n#include <mppi/utils/activation_functions.cuh>\n#include <mppi/utils/gpu_err_chk.cuh>\n#include \"mppi/utils/math_utils.h\"\n\nclass ActivationFunctionTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(0.0, 2.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(ActivationFunctionTest, TanhCPU)\n{\n  for (int i = 0; i < 1e5; i++)\n  {\n    float num = distribution(generator);\n    EXPECT_FLOAT_EQ(mppi::nn::tanh(num), std::tanh(num));\n  }\n}\n\n__global__ void tanhTestKernel(float* input, int num, int times)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    for (int i = 0; i < times; i++)\n    {\n      input[tid] = mppi::nn::tanh(input[tid]);\n    }\n  }\n}\n\n__global__ void tanhStableTestKernel(float* input, int num, int times)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    for (int i = 0; i < times; i++)\n    {\n      input[tid] = mppi::nn::tanh_accurate(input[tid]);\n    }\n  }\n}\n\ntemplate <int BLOCKDIM_X = 128>\nfloat launchTanhTestKernel(std::vector<float>& input, int times = 100)\n{\n  float* input_d;\n  float* input_stable_d;\n  int count = input.size();\n  HANDLE_ERROR(cudaMalloc((void**)&input_d, sizeof(float) * count));\n  HANDLE_ERROR(cudaMalloc((void**)&input_stable_d, sizeof(float) * count));\n  HANDLE_ERROR(cudaMemcpy(input_d, input.data(), sizeof(float) * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(input_stable_d, input.data(), sizeof(float) * count, cudaMemcpyHostToDevice));\n\n  const int gridsize_x = (count - 1) / BLOCKDIM_X + 1;\n  dim3 threadsPerBlock(BLOCKDIM_X);\n  dim3 numBlocks(gridsize_x, 1);\n\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  cudaEvent_t start, stop;\n  float time_stable, time_fast;\n  cudaEventCreate(&start);\n  cudaEventCreate(&stop);\n  std::cout << \"\\n===== TANH ======\\n\";\n\n  cudaEventRecord(start, stream);\n  tanhTestKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(input_d, count, times);\n  cudaEventRecord(stop, stream);\n  cudaEventSynchronize(stop);\n  cudaEventElapsedTime(&time_fast, start, stop);\n  std::cout << \"time to compute fast \" << time_fast << std::endl;\n\n  cudaEventRecord(start, stream);\n  tanhStableTestKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(input_stable_d, count, times);\n  cudaEventRecord(stop, stream);\n  cudaEventSynchronize(stop);\n  cudaEventElapsedTime(&time_stable, start, stop);\n  std::cout << \"time to compute stable \" << time_stable << std::endl;\n\n  std::cout << \"time difference: \" << time_stable - time_fast << std::endl;\n\n  HANDLE_ERROR(cudaMemcpy(input.data(), input_d, sizeof(float) * count, cudaMemcpyDeviceToHost));\n  cudaFree(input_d);\n  cudaFree(input_stable_d);\n\n  return time_stable - time_fast;\n}\n\nTEST_F(ActivationFunctionTest, TanhGPU)\n{\n  std::vector<float> vec(1e8);\n  for (int i = 0; i < vec.size(); i++)\n  {\n    vec[i] = distribution(generator);\n  }\n  std::vector<float> output_vec = vec;\n  launchTanhTestKernel(output_vec, 1);\n  for (int i = 0; i < vec.size(); i++)\n  {\n    EXPECT_NEAR(output_vec[i], std::tanh(vec[i]), 2.0e-7);\n  }\n\n  float average = 0;\n  for (int i = 0; i < 10; i++)\n  {\n    average += launchTanhTestKernel(output_vec);\n  }\n  average /= 10.0f;\n  std::cout << \"got average diff \" << average << std::endl;\n  // below condition is kind of a janky hardcode, might change on different systems\n  EXPECT_LT(std::abs(average), 3);\n  EXPECT_LT(average, 3);  // in ms\n  EXPECT_GT(average, -3);\n}\n\nTEST_F(ActivationFunctionTest, SigmoidCPU)\n{\n  for (int i = 0; i < 1e5; i++)\n  {\n    float num = distribution(generator);\n    EXPECT_FLOAT_EQ(mppi::nn::sigmoid(num), (1.0f / (1.0f + std::exp(-num))));\n  }\n}\n\n__global__ void sigmoidTestKernel(float* input, int num, int times)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    for (int i = 0; i < times; i++)\n    {\n      input[tid] = mppi::nn::sigmoid(input[tid]);\n    }\n  }\n}\n\n__global__ void sigmoidStableTestKernel(float* input, int num, int times)\n{\n  int tid = blockIdx.x * blockDim.x + threadIdx.x;\n  if (tid < num)\n  {\n    for (int i = 0; i < times; i++)\n    {\n      input[tid] = mppi::nn::sigmoid_accurate(input[tid]);\n    }\n  }\n}\n\ntemplate <int BLOCKDIM_X = 128>\nfloat launchSigmoidTestKernel(std::vector<float>& input, int times = 100)\n{\n  float* input_d;\n  float* input_stable_d;\n  int count = input.size();\n  HANDLE_ERROR(cudaMalloc((void**)&input_d, sizeof(float) * count));\n  HANDLE_ERROR(cudaMalloc((void**)&input_stable_d, sizeof(float) * count));\n  HANDLE_ERROR(cudaMemcpy(input_d, input.data(), sizeof(float) * count, cudaMemcpyHostToDevice));\n  HANDLE_ERROR(cudaMemcpy(input_stable_d, input.data(), sizeof(float) * count, cudaMemcpyHostToDevice));\n\n  const int gridsize_x = (count - 1) / BLOCKDIM_X + 1;\n  dim3 threadsPerBlock(BLOCKDIM_X);\n  dim3 numBlocks(gridsize_x, 1);\n\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  cudaEvent_t start, stop;\n  float time_fast, time_stable;\n  cudaEventCreate(&start);\n  cudaEventCreate(&stop);\n  std::cout << \"\\n===== SIGMOID ======\\n\";\n\n  cudaEventRecord(start, stream);\n  sigmoidTestKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(input_d, count, times);\n  cudaEventRecord(stop, stream);\n  cudaEventSynchronize(stop);\n  cudaEventElapsedTime(&time_fast, start, stop);\n  std::cout << \"time to compute fast \" << time_fast << std::endl;\n\n  cudaEventRecord(start, stream);\n  sigmoidStableTestKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(input_stable_d, count, times);\n  cudaEventRecord(stop, stream);\n  cudaEventSynchronize(stop);\n  cudaEventElapsedTime(&time_stable, start, stop);\n  std::cout << \"time to compute stable \" << time_stable << std::endl;\n\n  std::cout << \"time difference: \" << time_stable - time_fast << std::endl;\n\n  HANDLE_ERROR(cudaMemcpy(input.data(), input_d, sizeof(float) * count, cudaMemcpyDeviceToHost));\n  cudaFree(input_d);\n  cudaFree(input_stable_d);\n\n  return time_stable - time_fast;\n}\n\nTEST_F(ActivationFunctionTest, SigmoidGPU)\n{\n  std::vector<float> vec(1e8);\n  for (int i = 0; i < vec.size(); i++)\n  {\n    vec[i] = distribution(generator);\n  }\n  std::vector<float> output_vec = vec;\n  launchSigmoidTestKernel(output_vec, 1);\n  for (int i = 0; i < vec.size(); i++)\n  {\n    EXPECT_NEAR(output_vec[i], (1.0f / (1.0f + std::exp(-vec[i]))), 2.0e-7);\n  }\n\n  float average = 0.0f;\n  for (int i = 0; i < 10; i++)\n  {\n    average += launchSigmoidTestKernel(output_vec);\n  }\n  average /= 10.0f;\n  std::cout << \"got average difference \" << average << std::endl;\n  EXPECT_GT(average, 0.0f);\n  // below condition is kind of a janky hardcode, might change on different systems\n  // if should still be faster to use the faster version, so number should be positive\n  EXPECT_GT(average, 1.0f);  // in ms\n}\n"
  },
  {
    "path": "tests/nn_helpers/fnn_helper_test.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n#include <math.h>\n\n#include <mppi/utils/nn_helpers/fnn_helper.cuh>\n// Auto-generated header file\n#include <test_networks.h>\n#include <kernel_tests/utils/network_helper_kernel_test.cuh>\n#include <unsupported/Eigen/NumericalDiff>\n#include \"mppi/ddp/ddp_dynamics.h\"\n\nclass FNNHelperTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(0.0, 1.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(FNNHelperTest, ParamsConstructor1)\n{\n  std::vector<int> vec = { 5, 10, 3 };\n  FNNHelper<> helper(vec);\n\n  EXPECT_EQ(helper.getNumLayers(), 3);\n  EXPECT_EQ(helper.getLargestLayer(), 10);\n  EXPECT_EQ(helper.getNumParams(), 93);\n\n  auto stride = helper.getStrideIdcsPtr();\n  EXPECT_EQ(stride[0], 0);\n  EXPECT_EQ(stride[1], 50);\n  EXPECT_EQ(stride[2], 60);\n  EXPECT_EQ(stride[3], 90);\n\n  auto structure = helper.getNetStructurePtr();\n  EXPECT_EQ(structure[0], 5);\n  EXPECT_EQ(structure[1], 10);\n  EXPECT_EQ(structure[2], 3);\n\n  auto theta = helper.getThetaPtr();\n  for (int i = 0; i < 93; i++)\n  {\n    EXPECT_FLOAT_EQ(theta[i], 0.0f);\n  }\n\n  const int shared_mem_grd = helper.getGrdSharedSizeBytes();\n  const int shared_mem_blk = helper.getBlkSharedSizeBytes();\n  EXPECT_EQ(shared_mem_grd, 400);\n  EXPECT_EQ(shared_mem_blk, 80);\n}\n\nTEST_F(FNNHelperTest, ParamsConstructor2)\n{\n  std::vector<int> layers = { 5, 10, 20, 3, 3 };\n  FNNHelper<> helper(layers);\n  EXPECT_EQ(helper.getNumLayers(), 5);\n  EXPECT_EQ(helper.getNumParams(), 355);\n  EXPECT_EQ(helper.getLargestLayer(), 20);\n\n  auto stride = helper.getStrideIdcsPtr();\n  EXPECT_EQ(stride[0], 0);\n  EXPECT_EQ(stride[1], 50);\n  EXPECT_EQ(stride[2], 60);\n  EXPECT_EQ(stride[3], 260);\n  EXPECT_EQ(stride[4], 280);\n  EXPECT_EQ(stride[5], 340);\n  EXPECT_EQ(stride[6], 343);\n  EXPECT_EQ(stride[7], 352);\n\n  auto structure = helper.getNetStructurePtr();\n  EXPECT_EQ(structure[0], 5);\n  EXPECT_EQ(structure[1], 10);\n  EXPECT_EQ(structure[2], 20);\n  EXPECT_EQ(structure[3], 3);\n  EXPECT_EQ(structure[4], 3);\n\n  auto theta = helper.getThetaPtr();\n  for (int i = 0; i < 355; i++)\n  {\n    EXPECT_FLOAT_EQ(theta[i], 0.0f);\n  }\n\n  const int shared_mem_grd = helper.getGrdSharedSizeBytes();\n  const int shared_mem_blk = helper.getBlkSharedSizeBytes();\n  EXPECT_EQ(shared_mem_grd, 1472);\n  EXPECT_EQ(shared_mem_blk, 160);\n}\n\nTEST_F(FNNHelperTest, ParamsConstructor3)\n{\n  std::vector<int> layers = { 5, 3 };\n  FNNHelper<> helper(layers);\n\n  EXPECT_EQ(helper.getNumLayers(), 2);\n  EXPECT_EQ(helper.getLargestLayer(), 5);\n  EXPECT_EQ(helper.getNumParams(), 18);\n  EXPECT_EQ(helper.getGrdSharedSizeBytes(), 96);\n  EXPECT_EQ(helper.getBlkSharedSizeBytes(), 12 * 4);\n  EXPECT_EQ(5, helper.getInputDim());\n  EXPECT_EQ(3, helper.getOutputDim());\n\n  auto stride = helper.getStrideIdcsPtr();\n  EXPECT_EQ(stride[0], 0);\n  EXPECT_EQ(stride[1], 15);\n\n  auto structure = helper.getNetStructurePtr();\n  EXPECT_EQ(structure[0], 5);\n  EXPECT_EQ(structure[1], 3);\n\n  auto theta = helper.getThetaPtr();\n  for (int i = 0; i < 18; i++)\n  {\n    EXPECT_FLOAT_EQ(theta[i], 0.0f);\n  }\n}\n\nTEST_F(FNNHelperTest, ParamsConstructor4)\n{\n  std::vector<int> layers = { 5, 3 };\n  FNNHelper<false> helper(layers);\n\n  EXPECT_EQ(helper.getGrdSharedSizeBytes(), 0);\n  EXPECT_EQ(helper.getBlkSharedSizeBytes(), 12 * 4);\n}\n\nTEST_F(FNNHelperTest, BindStream)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  std::vector<int> layers = { 5, 3 };\n  FNNHelper<> helper(layers, stream);\n\n  EXPECT_EQ(helper.stream_, stream);\n}\n\nTEST_F(FNNHelperTest, GPUSetupAndParamsCheck)\n{\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n\n  float* theta = model.getThetaPtr();\n  int* stride = model.getStrideIdcsPtr();\n  int* net_structure = model.getNetStructurePtr();\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n  std::array<float, 1412> shared_theta_result = {};\n  std::array<int, 6> shared_stride_result = {};\n  std::array<int, 4> shared_net_structure_result = {};\n\n  EXPECT_EQ(model.GPUMemStatus_, false);\n  EXPECT_EQ(model.network_d_, nullptr);\n  EXPECT_NE(model.getGrdSharedSizeBytes(), 0);\n\n  model.GPUSetup();\n\n  EXPECT_NE(model.getGrdSharedSizeBytes(), 0);\n  EXPECT_EQ(model.GPUMemStatus_, true);\n  EXPECT_NE(model.network_d_, nullptr);\n\n  // launch kernel\n  launchParameterCheckTestKernel<FNNHelper<>, 1412, 6, 4>(model, theta_result, stride_result, net_structure_result,\n                                                          shared_theta_result, shared_stride_result,\n                                                          shared_net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    // these are a bunch of mostly random values and nan != nan\n    EXPECT_FLOAT_EQ(theta_result[i], theta[i]);\n    EXPECT_FLOAT_EQ(shared_theta_result[i], theta[i]);\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n    EXPECT_EQ(shared_stride_result[i], stride[i]);\n  }\n\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n    EXPECT_EQ(net_structure[i], shared_net_structure_result[i]);\n  }\n}\n\nTEST_F(FNNHelperTest, UpdateModelTest)\n{\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  float* theta = model.getThetaPtr();\n  int* stride = model.getStrideIdcsPtr();\n  int* net_structure = model.getNetStructurePtr();\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n  std::array<float, 1412> shared_theta_result = {};\n  std::array<int, 6> shared_stride_result = {};\n  std::array<int, 4> shared_net_structure_result = {};\n\n  model.GPUSetup();\n\n  std::vector<float> theta_vec(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n\n  model.updateModel({ 6, 32, 32, 4 }, theta_vec);\n\n  // check CPU\n  for (int i = 0; i < 1412; i++)\n  {\n    // these are a bunch of mostly random values and nan != nan\n    EXPECT_FLOAT_EQ(model.getThetaPtr()[i], theta_vec[i]);\n  }\n\n  // launch kernel\n  launchParameterCheckTestKernel<FNNHelper<>, 1412, 6, 4>(model, theta_result, stride_result, net_structure_result,\n                                                          shared_theta_result, shared_stride_result,\n                                                          shared_net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    // these are a bunch of mostly random values and nan != nan\n    EXPECT_FLOAT_EQ(theta_result[i], theta_vec[i]);\n    EXPECT_FLOAT_EQ(shared_theta_result[i], theta_vec[i]);\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n    EXPECT_EQ(shared_stride_result[i], stride[i]);\n  }\n\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n    EXPECT_EQ(net_structure[i], shared_net_structure_result[i]);\n  }\n}\n\nTEST_F(FNNHelperTest, LoadModelTest)\n{\n  std::vector<int> layers{ 6, 4 };\n  FNNHelper<> model(layers);\n  model.GPUSetup();\n\n  std::string path = mppi::tests::test_load_nn_file;\n  model.loadParams(path);\n\n  std::array<int, 6> stride = { 0, 192, 224, 1248, 1280, 1408 };\n  std::array<int, 4> net_structure = { 6, 32, 32, 4 };\n\n  // check CPU\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getThetaPtr()[i], i) << \"failed at index \" << i;\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(model.getStrideIdcsPtr()[i], stride[i]);\n  }\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], model.getNetStructurePtr()[i]);\n  }\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n  std::array<float, 1412> shared_theta_result = {};\n  std::array<int, 6> shared_stride_result = {};\n  std::array<int, 4> shared_net_structure_result = {};\n\n  // launch kernel\n  launchParameterCheckTestKernel<FNNHelper<>, 1412, 6, 4>(model, theta_result, stride_result, net_structure_result,\n                                                          shared_theta_result, shared_stride_result,\n                                                          shared_net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(theta_result[i], i) << \"failed at index \" << i;\n    EXPECT_FLOAT_EQ(shared_theta_result[i], i) << \"failed at index \" << i;\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n    EXPECT_EQ(shared_stride_result[i], stride[i]);\n  }\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n    EXPECT_EQ(net_structure[i], shared_net_structure_result[i]);\n  }\n\n  FNNHelper<> model2(path);\n  model2.GPUSetup();\n\n  // launch kernel\n  launchParameterCheckTestKernel<FNNHelper<>, 1412, 6, 4>(model2, theta_result, stride_result, net_structure_result,\n                                                          shared_theta_result, shared_stride_result,\n                                                          shared_net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(theta_result[i], i) << \"failed at index \" << i;\n    EXPECT_FLOAT_EQ(shared_theta_result[i], i) << \"failed at index \" << i;\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n    EXPECT_EQ(shared_stride_result[i], stride[i]);\n  }\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n    EXPECT_EQ(net_structure[i], shared_net_structure_result[i]);\n  }\n}\n\nTEST_F(FNNHelperTest, LoadModelNPZTest)\n{\n  std::vector<int> layers{ 6, 4 };\n  FNNHelper<> model(layers);\n  model.GPUSetup();\n\n  std::string path = mppi::tests::test_load_nn_file;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n  model.loadParams(param_dict);\n\n  // check CPU\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getThetaPtr()[i], i) << \"failed at index \" << i;\n  }\n\n  std::array<float, 1412> theta_result = {};\n  std::array<int, 6> stride_result = {};\n  std::array<int, 4> net_structure_result = {};\n  std::array<float, 1412> shared_theta_result = {};\n  std::array<int, 6> shared_stride_result = {};\n  std::array<int, 4> shared_net_structure_result = {};\n\n  std::array<int, 6> stride = { 0, 192, 224, 1248, 1280, 1408 };\n  std::array<int, 4> net_structure = { 6, 32, 32, 4 };\n\n  // launch kernel\n  launchParameterCheckTestKernel<FNNHelper<>, 1412, 6, 4>(model, theta_result, stride_result, net_structure_result,\n                                                          shared_theta_result, shared_stride_result,\n                                                          shared_net_structure_result);\n\n  for (int i = 0; i < 1412; i++)\n  {\n    EXPECT_FLOAT_EQ(theta_result[i], i) << \"failed at index \" << i;\n    EXPECT_FLOAT_EQ(shared_theta_result[i], i) << \"failed at index \" << i;\n  }\n  for (int i = 0; i < 6; i++)\n  {\n    EXPECT_EQ(stride_result[i], stride[i]);\n    EXPECT_EQ(shared_stride_result[i], stride[i]);\n  }\n  for (int i = 0; i < 4; i++)\n  {\n    EXPECT_EQ(net_structure[i], net_structure_result[i]);\n    EXPECT_EQ(net_structure[i], shared_net_structure_result[i]);\n  }\n}\n\nTEST_F(FNNHelperTest, LoadModelNPZTestNested)\n{\n  std::vector<int> layers{ 28, 30, 30, 2 };\n  FNNHelper<> model(layers);\n  model.GPUSetup();\n\n  std::string path = mppi::tests::test_lstm_lstm;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n  model.loadParams(\"output\", param_dict);\n}\n\nTEST_F(FNNHelperTest, forwardCPU)\n{\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  Eigen::MatrixXf input = model.getZeroInputVector();\n  input.setZero();\n  Eigen::MatrixXf output = model.getZeroOutputVector();\n\n  std::vector<float> theta(1412);\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output(0), 33);\n  EXPECT_FLOAT_EQ(output(1), 33);\n  EXPECT_FLOAT_EQ(output(2), 33);\n  EXPECT_FLOAT_EQ(output(3), 33);\n\n  // modify bias\n  theta[1408] = 2.0;\n  theta[1409] = 3.0;\n  theta[1410] = 4.0;\n  theta[1411] = 5.0;\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output(0), 34);\n  EXPECT_FLOAT_EQ(output(1), 35);\n  EXPECT_FLOAT_EQ(output(2), 36);\n  EXPECT_FLOAT_EQ(output(3), 37);\n\n  // modify weight\n  theta[1280] = 2.0;\n  theta[1312] = 2.0;\n  theta[1344] = 2.0;\n  theta[1376] = 2.0;\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output(0), 35);\n  EXPECT_FLOAT_EQ(output(1), 36);\n  EXPECT_FLOAT_EQ(output(2), 37);\n  EXPECT_FLOAT_EQ(output(3), 38);\n}\n\nTEST_F(FNNHelperTest, forwardGPU)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  model.GPUSetup();\n\n  std::vector<float> theta(1412);\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::Matrix<float, 6, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 6, num_rollouts>::Zero();\n\n  std::vector<std::array<float, 6>> input_arr(num_rollouts);\n  std::vector<std::array<float, 4>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n\n    launchForwardTestKernel<FNNHelper<>, 6, 4, 32>(model, input_arr, output_arr, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      Eigen::MatrixXf input = inputs.col(point);\n      Eigen::MatrixXf output = model.getZeroOutputVector();\n\n      model.forward(input, output);\n      for (int dim = 0; dim < 6; dim++)\n      {\n        EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n      }\n      for (int dim = 0; dim < 4; dim++)\n      {\n        EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        EXPECT_FLOAT_EQ(output(dim), 33);\n      }\n    }\n  }\n}\n\nTEST_F(FNNHelperTest, forwardGPUCompare)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  model.GPUSetup();\n\n  std::vector<float> theta(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta[i] = distribution(generator);\n  }\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::Matrix<float, 6, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 6, num_rollouts>::Random();\n\n  std::vector<std::array<float, 6>> input_arr(num_rollouts);\n  std::vector<std::array<float, 4>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n\n    launchForwardTestKernel<FNNHelper<>, 6, 4, 32>(model, input_arr, output_arr, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      Eigen::MatrixXf input = inputs.col(point);\n      Eigen::MatrixXf output = model.getZeroOutputVector();\n\n      model.forward(input, output);\n      for (int dim = 0; dim < 6; dim++)\n      {\n        EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n      }\n      for (int dim = 0; dim < 4; dim++)\n      {\n        EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(output_arr[point][dim]));\n      }\n    }\n  }\n}\n\nTEST_F(FNNHelperTest, forwardGPUCompareNoShared)\n{\n  const int num_rollouts = 10;\n\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<false> model(layers);\n  model.GPUSetup();\n\n  std::vector<float> theta(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta[i] = distribution(generator);\n  }\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::Matrix<float, 6, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 6, num_rollouts>::Random();\n\n  std::vector<std::array<float, 6>> input_arr(num_rollouts);\n  std::vector<std::array<float, 4>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n\n    launchForwardTestKernel<FNNHelper<false>, 6, 4, 32>(model, input_arr, output_arr, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      Eigen::MatrixXf input = inputs.col(point);\n      Eigen::MatrixXf output = model.getZeroOutputVector();\n\n      model.forward(input, output);\n      for (int dim = 0; dim < 6; dim++)\n      {\n        EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4)\n            << \"at index \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n      }\n      for (int dim = 0; dim < 4; dim++)\n      {\n        EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4)\n            << \"at index \" << point << \" dim \" << dim << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(output_arr[point][dim]));\n      }\n    }\n  }\n}\n\nTEST_F(FNNHelperTest, forwardGPUComparePreload)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  model.GPUSetup();\n\n  std::vector<float> theta(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta[i] = distribution(generator);\n  }\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::Matrix<float, 6, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 6, num_rollouts>::Random();\n\n  std::vector<std::array<float, 6>> input_arr(num_rollouts);\n  std::vector<std::array<float, 4>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n\n    launchForwardTestKernelPreload<FNNHelper<>, 6, 4, 32>(model, input_arr, output_arr, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      Eigen::MatrixXf input = inputs.col(point);\n      Eigen::MatrixXf output = model.getZeroOutputVector();\n\n      model.forward(input, output);\n      for (int dim = 0; dim < 6; dim++)\n      {\n        EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n      }\n      for (int dim = 0; dim < 4; dim++)\n      {\n        EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(output_arr[point][dim]));\n      }\n    }\n  }\n}\n\nTEST_F(FNNHelperTest, forwardGPUComparePreloadNoShared)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<false> model(layers);\n  model.GPUSetup();\n\n  std::vector<float> theta(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta[i] = distribution(generator);\n  }\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::Matrix<float, 6, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 6, num_rollouts>::Random();\n\n  std::vector<std::array<float, 6>> input_arr(num_rollouts);\n  std::vector<std::array<float, 4>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n\n    launchForwardTestKernelPreload<FNNHelper<false>, 6, 4, 32>(model, input_arr, output_arr, y_dim);\n    for (int point = 0; point < num_rollouts; point++)\n    {\n      Eigen::MatrixXf input = inputs.col(point);\n      Eigen::MatrixXf output = model.getZeroOutputVector();\n\n      model.forward(input, output);\n      for (int dim = 0; dim < 6; dim++)\n      {\n        EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n      }\n      for (int dim = 0; dim < 4; dim++)\n      {\n        EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        EXPECT_TRUE(isfinite(output_arr[point][dim]));\n      }\n    }\n  }\n}\n\nTEST_F(FNNHelperTest, TestComputeGradComputationFinite)\n{\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  std::vector<float> theta(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta[i] = distribution(generator);\n  }\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::MatrixXf numeric_jac = model.getZeroJacobianMatrix();\n  Eigen::MatrixXf analytic_jac = model.getZeroJacobianMatrix();\n\n  for (int i = 0; i < 1000; i++)\n  {\n    Eigen::MatrixXf input = model.getZeroInputVector();\n    input.Random();\n\n    model.computeGrad(input, analytic_jac);\n    EXPECT_TRUE(analytic_jac.allFinite());\n  }\n}\n\nTEST_F(FNNHelperTest, TestComputeGradComputationCompare)\n{\n  GTEST_SKIP();\n  std::vector<int> layers{ 6, 32, 32, 4 };\n  FNNHelper<> model(layers);\n  std::vector<float> theta(1412);\n  for (int i = 0; i < 1412; i++)\n  {\n    theta[i] = distribution(generator);\n  }\n  model.updateModel({ 6, 32, 32, 4 }, theta);\n\n  Eigen::MatrixXf numeric_jac;\n  Eigen::MatrixXf analytic_jac;\n\n  Eigen::MatrixXf input;\n  input << 1, 2, 3, 4, 5, 6;\n\n  model.computeGrad(input, analytic_jac);\n\n  // numeric_jac = num_diff.df(input, numeric_jac);\n\n  ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\"\n                                                       << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n                                                       << analytic_jac;\n}\n"
  },
  {
    "path": "tests/nn_helpers/lstm_helper_test.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n#include <math.h>\n\n#include <mppi/utils/nn_helpers/lstm_helper.cuh>\n// Auto-generated header file\n#include <test_networks.h>\n#include <kernel_tests/utils/network_helper_kernel_test.cuh>\n#include <mppi/utils/math_utils.h>\n#include <unsupported/Eigen/NumericalDiff>\n#include \"mppi/ddp/ddp_dynamics.h\"\n\nclass LSTMHelperTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(0.0, 1.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(LSTMHelperTest, ParamsConstructor1)\n{\n  std::vector<int> vec = { 30, 10, 3 };\n  LSTMHelper<> model(5, 25, vec);\n\n  EXPECT_EQ(model.getLSTMGrdSharedSizeBytes(), 12400);\n  EXPECT_EQ(model.getGrdSharedSizeBytes(), 13808);\n  EXPECT_EQ(model.getBlkSharedSizeBytes(), (25 * 2 + 30 * 2) * sizeof(float) + 8);\n\n  EXPECT_EQ(model.getHiddenDim(), 25);\n  EXPECT_EQ(model.getInputDim(), 5);\n  EXPECT_EQ(model.getOutputDim(), 3);\n  EXPECT_EQ(model.getHiddenHiddenSize(), 25 * 25);\n  EXPECT_EQ(model.getInputHiddenSize(), 5 * 25);\n\n  float* W_im = model.getWeights();\n  float* W_fm = model.getWeights() + model.getHiddenHiddenSize();\n  float* W_om = model.getWeights() + 2 * model.getHiddenHiddenSize();\n  float* W_cm = model.getWeights() + 3 * model.getHiddenHiddenSize();\n  for (int i = 0; i < 25 * 25; i++)\n  {\n    EXPECT_FLOAT_EQ(W_im[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_fm[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_cm[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_om[i], 0.0f);\n  }\n\n  float* W_ii = model.getWeights() + 4 * model.getHiddenHiddenSize();\n  float* W_fi = model.getWeights() + 4 * model.getHiddenHiddenSize() + model.getInputHiddenSize();\n  float* W_oi = model.getWeights() + 4 * model.getHiddenHiddenSize() + 2 * model.getInputHiddenSize();\n  float* W_ci = model.getWeights() + 4 * model.getHiddenHiddenSize() + 3 * model.getInputHiddenSize();\n  for (int i = 0; i < 5 * 25; i++)\n  {\n    EXPECT_FLOAT_EQ(W_ii[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_fi[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_oi[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_ci[i], 0.0f);\n  }\n\n  float* b_i = model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize();\n  float* b_f =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + model.getHiddenDim();\n  float* b_o =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 2 * model.getHiddenDim();\n  float* b_c =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 3 * model.getHiddenDim();\n  for (int i = 0; i < 25; i++)\n  {\n    EXPECT_FLOAT_EQ(b_i[i], 0.0f);\n    EXPECT_FLOAT_EQ(b_f[i], 0.0f);\n    EXPECT_FLOAT_EQ(b_o[i], 0.0f);\n    EXPECT_FLOAT_EQ(b_c[i], 0.0f);\n  }\n\n  float* init_hidden =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 4 * model.getHiddenDim();\n  float* init_cell =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 5 * model.getHiddenDim();\n  for (int i = 0; i < 25; i++)\n  {\n    EXPECT_FLOAT_EQ(init_hidden[i], 0.0f);\n    EXPECT_FLOAT_EQ(init_cell[i], 0.0f);\n  }\n}\n\nTEST_F(LSTMHelperTest, ConfigConstructor)\n{\n  LSTMConfig config;\n  config.input_dim = 5;\n  config.hidden_dim = 25;\n  config.output_layers = { 30, 10, 3 };\n  LSTMHelper<> model(config);\n\n  EXPECT_EQ(model.getLSTMGrdSharedSizeBytes(), 12400);\n  EXPECT_EQ(model.getGrdSharedSizeBytes(), 13808);\n  EXPECT_EQ(model.getBlkSharedSizeBytes(), (25 * 2 + 30 * 2) * sizeof(float) + 8);\n\n  EXPECT_EQ(model.getHiddenDim(), 25);\n  EXPECT_EQ(model.getInputDim(), 5);\n  EXPECT_EQ(model.getOutputDim(), 3);\n  EXPECT_EQ(model.getHiddenHiddenSize(), 25 * 25);\n  EXPECT_EQ(model.getInputHiddenSize(), 5 * 25);\n\n  float* W_im = model.getWeights();\n  float* W_fm = model.getWeights() + model.getHiddenHiddenSize();\n  float* W_om = model.getWeights() + 2 * model.getHiddenHiddenSize();\n  float* W_cm = model.getWeights() + 3 * model.getHiddenHiddenSize();\n  for (int i = 0; i < 25 * 25; i++)\n  {\n    EXPECT_FLOAT_EQ(W_im[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_fm[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_cm[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_om[i], 0.0f);\n  }\n\n  float* W_ii = model.getWeights() + 4 * model.getHiddenHiddenSize();\n  float* W_fi = model.getWeights() + 4 * model.getHiddenHiddenSize() + model.getInputHiddenSize();\n  float* W_oi = model.getWeights() + 4 * model.getHiddenHiddenSize() + 2 * model.getInputHiddenSize();\n  float* W_ci = model.getWeights() + 4 * model.getHiddenHiddenSize() + 3 * model.getInputHiddenSize();\n  for (int i = 0; i < 5 * 25; i++)\n  {\n    EXPECT_FLOAT_EQ(W_ii[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_fi[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_oi[i], 0.0f);\n    EXPECT_FLOAT_EQ(W_ci[i], 0.0f);\n  }\n\n  float* b_i = model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize();\n  float* b_f =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + model.getHiddenDim();\n  float* b_o =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 2 * model.getHiddenDim();\n  float* b_c =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 3 * model.getHiddenDim();\n  for (int i = 0; i < 25; i++)\n  {\n    EXPECT_FLOAT_EQ(b_i[i], 0.0f);\n    EXPECT_FLOAT_EQ(b_f[i], 0.0f);\n    EXPECT_FLOAT_EQ(b_o[i], 0.0f);\n    EXPECT_FLOAT_EQ(b_c[i], 0.0f);\n  }\n\n  float* init_hidden =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 4 * model.getHiddenDim();\n  float* init_cell =\n      model.getWeights() + 4 * model.getHiddenHiddenSize() + 4 * model.getInputHiddenSize() + 5 * model.getHiddenDim();\n  for (int i = 0; i < 25; i++)\n  {\n    EXPECT_FLOAT_EQ(init_hidden[i], 0.0f);\n    EXPECT_FLOAT_EQ(init_cell[i], 0.0f);\n  }\n}\n\nTEST_F(LSTMHelperTest, ParamsConstructor2)\n{\n  int total_amount = 0;\n\n  // delay model\n  std::vector<int> delay_vec = { 2, 10, 1 };\n  LSTMHelper<> delay(1, 1, delay_vec);\n  total_amount += delay.getGrdSharedSizeBytes() / sizeof(float) + 1;\n  total_amount += delay.getBlkSharedSizeBytes() * 32;\n  // terra model\n  std::vector<int> terra_vec = { 18, 10, 3 };\n  LSTMHelper<> terra(8, 10, terra_vec);\n  total_amount += terra.getGrdSharedSizeBytes() / sizeof(float) + 1;\n  total_amount += terra.getBlkSharedSizeBytes() * 32;\n  // engine model\n  std::vector<int> engine_vec = { 9, 10, 1 };\n  LSTMHelper<> engine(4, 5, engine_vec);\n  total_amount += engine.getGrdSharedSizeBytes() / sizeof(float) + 1;\n  total_amount += engine.getBlkSharedSizeBytes() * 32;\n  // steering model\n  std::vector<int> steering_vec = { 12, 20, 1 };\n  LSTMHelper<> steering(7, 5, steering_vec);\n  total_amount += steering.getGrdSharedSizeBytes() / sizeof(float) + 1;\n  total_amount += steering.getBlkSharedSizeBytes() * 32;\n\n  std::cout << \"total amount: \" << total_amount << std::endl;\n  EXPECT_LT(total_amount, 49152);\n}\n\nTEST_F(LSTMHelperTest, BindStream)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  std::vector<int> vec = { 30, 10, 3 };\n  LSTMHelper<> helper(5, 25, vec, stream);\n\n  EXPECT_EQ(helper.stream_, stream);\n}\n\n// using LSTM = LSTMHelper<LSTMParams<8, 20>, FNNParams<28, 3>>;\nTEST_F(LSTMHelperTest, GPUSetupAndParamsCheck)\n{\n  std::vector<int> vec = { 30, 3 };\n  LSTMHelper<> model(5, 25, vec);\n\n  std::vector<float> theta_vec(93);\n  for (int i = 0; i < 93; i++)\n  {\n    // theta_vec[i] = distribution(generator);\n    theta_vec[i] = static_cast<float>(i);\n  }\n  model.updateOutputModel({ 30, 3 }, theta_vec);\n\n  int grid_dim = 5;\n\n  std::vector<float> lstm_params(grid_dim);\n  std::vector<float> shared_lstm_params(grid_dim);\n  std::vector<float> fnn_params(grid_dim);\n  std::vector<float> shared_fnn_params(grid_dim);\n\n  EXPECT_EQ(model.GPUMemStatus_, false);\n  EXPECT_EQ(model.network_d_, nullptr);\n  EXPECT_NE(model.getOutputModel(), nullptr);\n\n  model.GPUSetup();\n\n  EXPECT_EQ(model.GPUMemStatus_, true);\n  EXPECT_NE(model.network_d_, nullptr);\n\n  // launch kernel\n  launchParameterCheckTestKernel<LSTMHelper<>>(model, lstm_params, shared_lstm_params, fnn_params, shared_fnn_params,\n                                               grid_dim);\n\n  EXPECT_EQ(model.getOutputGrdSharedSizeBytes(), 400);\n\n  for (int grid = 0; grid < grid_dim; grid++)\n  {\n    // ensure that the output nn matches\n    for (int i = 0; i < 93; i++)\n    {\n      EXPECT_FLOAT_EQ(fnn_params[grid * 100 + i], theta_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 93], 0);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 94], 90);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 95], 30);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 96], 3);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 97], 0);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 98], 0);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 99], 0);\n\n    const int hh = 25 * 25;\n    const int ih = 5 * 25;\n    int shift = (25 * 6 + hh * 4 + ih * 4) * grid;\n\n    for (int i = 0; i < 25 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + hh + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 2 * hh + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 3 * hh + i], 0.0f);\n    }\n\n    for (int i = 0; i < 8 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + ih + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 2 * ih + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 3 * ih + i], 0.0f);\n    }\n\n    for (int i = 0; i < 25; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 25 + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 50 + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 75 + i], 0.0f);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 100 + i], 0.0f) << \"at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 125 + i], 0.0f) << \"at index \" << i;\n    }\n  }\n\n  for (int grid = 0; grid < grid_dim; grid++)\n  {\n    // ensure that the output nn matches\n    for (int i = 0; i < 93; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_fnn_params[grid * 100 + i], theta_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n    // EXPECT_EQ(static_cast<int>(fnn_params[grid * 97 + 93]), 0);\n    // EXPECT_EQ(static_cast<int>(fnn_params[grid * 97 + 94]), 90);\n    // EXPECT_EQ(static_cast<int>(fnn_params[grid * 97 + 95]), 30);\n    // EXPECT_EQ(static_cast<int>(fnn_params[grid * 97 + 96]), 3);\n\n    const int hh = 25 * 25;\n    const int ih = 5 * 25;\n    int shift = (20 * 6 + hh * 4 + ih * 4) * grid;\n\n    for (int i = 0; i < 25 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + hh + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 2 * hh + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 3 * hh + i], 0.0f);\n    }\n\n    for (int i = 0; i < 8 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + ih + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 2 * ih + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 3 * ih + i], 0.0f);\n    }\n\n    for (int i = 0; i < 25; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 25 + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 50 + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 75 + i], 0.0f);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 100 + i], 0.0f) << \"at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 125 + i], 0.0f) << \"at index \" << i;\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, UpdateModel)\n{\n  std::vector<int> vec = { 30, 3 };\n  LSTMHelper<> model(5, 25, vec);\n\n  int grid_dim = 1;\n\n  std::vector<float> theta_vec(93);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = static_cast<float>(i);\n  }\n  model.updateOutputModel({ 30, 3 }, theta_vec);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = static_cast<float>(i);\n  }\n\n  std::vector<float> lstm_params(grid_dim);\n  std::vector<float> shared_lstm_params(grid_dim);\n  std::vector<float> fnn_params(grid_dim);\n  std::vector<float> shared_fnn_params(grid_dim);\n\n  EXPECT_EQ(model.GPUMemStatus_, false);\n  EXPECT_EQ(model.network_d_, nullptr);\n  EXPECT_NE(model.getOutputModel(), nullptr);\n\n  model.GPUSetup();\n\n  EXPECT_EQ(model.GPUMemStatus_, true);\n  EXPECT_NE(model.network_d_, nullptr);\n\n  // launch kernel\n  launchParameterCheckTestKernel<LSTMHelper<>>(model, lstm_params, shared_lstm_params, fnn_params, shared_fnn_params,\n                                               grid_dim);\n\n  EXPECT_EQ(model.getOutputGrdSharedSizeBytes(), 400);\n  for (int grid = 0; grid < grid_dim; grid++)\n  {\n    // ensure that the output nn matches\n    for (int i = 0; i < theta_vec.size(); i++)\n    {\n      EXPECT_FLOAT_EQ(fnn_params[grid * 100 + i], theta_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 93], 0);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 94], 90);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 95], 30);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 96], 3);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 97], 0);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 98], 0);\n    EXPECT_EQ(((int*)fnn_params.data())[grid * 100 + 99], 0);\n\n    const int hh = 25 * 25;\n    const int ih = 5 * 25;\n    int shift = (25 * 6 + hh * 4 + ih * 4) * grid;\n\n    for (int i = 0; i < 25 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + i], weights_d[i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + hh + i], weights_d[hh + i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 2 * hh + i], weights_d[2 * hh + i])\n          << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 3 * hh + i], weights_d[3 * hh + i])\n          << \"at grid \" << grid << \" at index \" << i;\n    }\n\n    for (int i = 0; i < 8 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + i], weights_d[4 * hh + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + ih + i], weights_d[4 * hh + ih + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 2 * ih + i], weights_d[4 * hh + 2 * ih + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 3 * ih + i], weights_d[4 * hh + 3 * ih + i]);\n    }\n\n    for (int i = 0; i < 25; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + i], weights_d[4 * hh + 4 * ih + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 25 + i], weights_d[4 * hh + 4 * ih + 25 + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 50 + i], weights_d[4 * hh + 4 * ih + 50 + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 75 + i], weights_d[4 * hh + 4 * ih + 75 + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 100 + i], weights_d[4 * hh + 4 * ih + 100 + i])\n          << \"at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 125 + i], weights_d[4 * hh + 4 * ih + 125 + i])\n          << \"at index \" << i;\n    }\n  }\n\n  for (int grid = 0; grid < grid_dim; grid++)\n  {\n    // ensure that the output nn matches\n    for (int i = 0; i < 93; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_fnn_params[grid * 97 + i], theta_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n    // EXPECT_EQ((int)fnn_params[grid * 97 + 93], 0);\n    // EXPECT_EQ((int)fnn_params[grid * 97 + 94], 90);\n    // EXPECT_EQ((int)fnn_params[grid * 97 + 95], 30);\n    // EXPECT_EQ((int)fnn_params[grid * 97 + 96], 3);\n\n    const int hh = 25 * 25;\n    const int ih = 5 * 25;\n    int shift = (25 * 6 + hh * 4 + ih * 4) * grid;\n\n    for (int i = 0; i < 25 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + i], weights_d[i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + hh + i], weights_d[hh + i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 2 * hh + i], weights_d[2 * hh + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 3 * hh + i], weights_d[3 * hh + i]);\n    }\n\n    for (int i = 0; i < 8 * 25; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + i], weights_d[4 * hh + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + ih + i], weights_d[4 * hh + ih + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 2 * ih + i], weights_d[4 * hh + 2 * ih + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 3 * ih + i], weights_d[4 * hh + 3 * ih + i]);\n    }\n\n    for (int i = 0; i < 25; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + i], weights_d[4 * hh + 4 * ih + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 25 + i], weights_d[4 * hh + 4 * ih + 25 + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 50 + i], weights_d[4 * hh + 4 * ih + 50 + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 75 + i], weights_d[4 * hh + 4 * ih + 75 + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 100 + i], weights_d[4 * hh + 4 * ih + 100 + i])\n          << \"at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 125 + i], weights_d[4 * hh + 4 * ih + 125 + i])\n          << \"at index \" << i;\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, LoadModelPathTest)\n{\n  std::vector<int> vec = { 2, 1 };\n  LSTMHelper<> model(1, 1, vec);\n  model.GPUSetup();\n\n  std::string path = mppi::tests::test_lstm_lstm;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  model.loadParams(path);\n\n  cnpy::npz_t input_outputs = cnpy::npz_load(path);\n  int num_points = input_outputs.at(\"num_points\").data<int>()[0];\n  std::cout << \"got T \" << input_outputs.at(\"T\").data<double>()[0] << \" dt \" << input_outputs.at(\"dt\").data<double>()[0]\n            << std::endl;\n  std::cout << \"num points \" << num_points << std::endl;\n  int T = std::round(input_outputs.at(\"T\").data<double>()[0] / input_outputs.at(\"dt\").data<double>()[0]);\n  EXPECT_EQ(num_points, 3);\n  EXPECT_EQ(T, 5);\n\n  double* inputs = input_outputs.at(\"input\").data<double>();\n  double* outputs = input_outputs.at(\"output\").data<double>();\n  double* init_hidden = input_outputs.at(\"init/hidden\").data<double>();\n  double* init_cell = input_outputs.at(\"init/cell\").data<double>();\n  double* hidden = input_outputs.at(\"hidden\").data<double>();\n  double* cell = input_outputs.at(\"cell\").data<double>();\n\n  double tol = 1e-5;\n\n  auto input = model.getZeroInputVector();\n  auto output = model.getZeroOutputVector();\n\n  const int hidden_dim = model.getHiddenDim();\n  const int input_dim = model.getInputDim();\n  const int output_dim = model.getOutputDim();\n\n  for (int point = 0; point < num_points; point++)\n  {\n    Eigen::VectorXf initial_hidden = Eigen::VectorXf(hidden_dim, 1);\n    Eigen::VectorXf initial_cell = Eigen::VectorXf(hidden_dim, 1);\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      initial_hidden(i) = init_hidden[hidden_dim * point + i];\n      initial_cell(i) = init_cell[hidden_dim * point + i];\n    }\n    model.updateLSTMInitialStates(initial_hidden, initial_cell);\n    for (int t = 0; t < T; t++)\n    {\n      for (int i = 0; i < input_dim; i++)\n      {\n        input(i) = inputs[point * T * input_dim + t * input_dim + i];\n      }\n      model.forward(input, output);\n\n      for (int i = 0; i < output_dim; i++)\n      {\n        EXPECT_NEAR(output(i), outputs[point * T * output_dim + t * output_dim + i], tol)\n            << \"t: \" << t << \" point \" << point << \" at dim \" << i;\n      }\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_NEAR(model.getHiddenState()(i), hidden[point * T * hidden_dim + hidden_dim * t + i], tol)\n            << \"t: \" << t << \" point \" << point << \" at dim \" << i;\n        EXPECT_NEAR(model.getCellState()(i), cell[point * T * hidden_dim + hidden_dim * t + i], tol)\n            << \"t: \" << t << \" point \" << point << \" at dim \" << i;\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, LoadModelPathInitTest)\n{\n  std::vector<int> vec = { 2, 1 };\n  LSTMHelper<> model(1, 1, vec);\n  model.GPUSetup();\n\n  std::string path = mppi::tests::test_lstm_lstm;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n  model.loadParams(\"init_\", param_dict, false);\n\n  cnpy::npz_t input_outputs = cnpy::npz_load(path);\n  int num_points = input_outputs.at(\"num_points\").data<int>()[0];\n  int T = std::round(input_outputs.at(\"tau\").data<double>()[0] / input_outputs.at(\"dt\").data<double>()[0]) + 1;\n  double* init_inputs = input_outputs.at(\"init_input\").data<double>();\n  double* init_hidden = input_outputs.at(\"init/hidden\").data<double>();\n  double* init_cell = input_outputs.at(\"init/cell\").data<double>();\n  // TODO not the right config to load this data essentially\n  // double* init_step_hidden = input_outputs.at(\"init_step_hidden\").data<double>();\n  // double* init_step_cell = input_outputs.at(\"init_step_cell\").data<double>();\n  EXPECT_EQ(num_points, 3);\n  EXPECT_EQ(T, 6);\n\n  double tol = 1e-5;\n\n  auto input = model.getZeroInputVector();\n  auto output = model.getZeroOutputVector();\n\n  const int hidden_dim = model.getHiddenDim();\n  const int input_dim = model.getInputDim();\n  const int output_dim = model.getOutputDim();\n\n  EXPECT_EQ(hidden_dim, 60);\n  EXPECT_EQ(input_dim, 3);\n  EXPECT_EQ(output_dim, 50);\n\n  for (int point = 0; point < num_points; point++)\n  {\n    // run the init network and ensure initial hidden/cell match\n    model.resetHiddenCellCPU();\n    for (int t = 0; t < T; t++)\n    {\n      for (int i = 0; i < input_dim; i++)\n      {\n        input(i) = init_inputs[point * T * input_dim + t * input_dim + i];\n      }\n\n      model.forward(input, output);\n      // for (int i = 0; i < 60; i++)\n      // {\n      //   EXPECT_NEAR(model.getHiddenState()(i), init_step_hidden[60 * point * T + t * 60 + i], tol)\n      //       << \"at t \" << t << \" dim \" << i;\n      //   EXPECT_NEAR(model.getCellState()(i), init_step_cell[60 * point * T + t * 60 + i], tol)\n      //       << \"at t \" << t << \" dim \" << i;\n      // }\n    }\n    for (int i = 0; i < 25; i++)\n    {\n      EXPECT_NEAR(output(i), init_hidden[25 * point + i], tol)\n          << \"incorrect hidden at point \" << point << \" index \" << i;\n      EXPECT_NEAR(output(i + 25), init_cell[25 * point + i], tol)\n          << \"incorrect cell at point \" << point << \" index \" << i;\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, LoadModelNPZTest)\n{\n  std::vector<int> vec = { 2, 1 };\n  LSTMHelper<> model(1, 1, vec);\n  model.GPUSetup();\n\n  std::string path = mppi::tests::test_lstm_lstm;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  cnpy::npz_t param_dict = cnpy::npz_load(path);\n  model.loadParams(param_dict);\n\n  cnpy::npz_t input_outputs = cnpy::npz_load(path);\n  int num_points = input_outputs.at(\"num_points\").data<int>()[0];\n  int T = std::round(input_outputs.at(\"T\").data<float>()[0] / input_outputs.at(\"dt\").data<float>()[0]);\n  double* inputs = input_outputs.at(\"input\").data<double>();\n  double* outputs = input_outputs.at(\"output\").data<double>();\n  double* init_hidden = input_outputs.at(\"init/hidden\").data<double>();\n  double* init_cell = input_outputs.at(\"init/cell\").data<double>();\n  double* hidden = input_outputs.at(\"hidden\").data<double>();\n  double* cell = input_outputs.at(\"cell\").data<double>();\n\n  double tol = 1e-5;\n\n  auto input = model.getZeroInputVector();\n  auto output = model.getZeroOutputVector();\n\n  const int hidden_dim = model.getHiddenDim();\n  const int input_dim = model.getInputDim();\n  const int output_dim = model.getOutputDim();\n\n  for (int point = 0; point < num_points; point++)\n  {\n    Eigen::VectorXf initial_hidden = Eigen::VectorXf(hidden_dim, 1);\n    Eigen::VectorXf initial_cell = Eigen::VectorXf(hidden_dim, 1);\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      initial_hidden(i) = init_hidden[hidden_dim * point + i];\n      initial_cell(i) = init_cell[hidden_dim * point + i];\n    }\n    model.updateLSTMInitialStates(initial_hidden, initial_cell);\n    for (int t = 0; t < T; t++)\n    {\n      for (int i = 0; i < 3; i++)\n      {\n        input(i) = inputs[point * T * 3 + t * 3 + i];\n      }\n      model.forward(input, output);\n\n      for (int i = 0; i < 2; i++)\n      {\n        EXPECT_NEAR(output[i], outputs[point * T * 2 + t * 2 + i], tol) << \"point \" << point << \" at dim \" << i;\n      }\n      for (int i = 0; i < 25; i++)\n      {\n        EXPECT_NEAR(model.getHiddenState()(i), hidden[point * T * 25 + 25 * t + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n        EXPECT_NEAR(model.getCellState()(i), cell[point * T * 25 + 25 * t + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, forwardCPU)\n{\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<> model(8, 20, vec);\n\n  std::vector<float> theta_vec(87);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = 1.0;\n  }\n  model.updateOutputModel({ 28, 3 }, theta_vec);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = 1.0f;\n  }\n  model.resetHiddenCellCPU();\n\n  auto input = model.getZeroInputVector();\n  auto output = model.getZeroOutputVector();\n  input.setOnes();\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 28.28055);\n  EXPECT_FLOAT_EQ(output[1], 28.28055);\n  EXPECT_FLOAT_EQ(output[2], 28.28055);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 28.901096);\n  EXPECT_FLOAT_EQ(output[1], 28.901096);\n  EXPECT_FLOAT_EQ(output[2], 28.901096);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 28.986588);\n  EXPECT_FLOAT_EQ(output[1], 28.986588);\n  EXPECT_FLOAT_EQ(output[2], 28.986588);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 28.998184);\n  EXPECT_FLOAT_EQ(output[1], 28.998184);\n  EXPECT_FLOAT_EQ(output[2], 28.998184);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 28.999756);\n  EXPECT_FLOAT_EQ(output[1], 28.999756);\n  EXPECT_FLOAT_EQ(output[2], 28.999756);\n}\n\nTEST_F(LSTMHelperTest, forwardGPU)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<> model(8, 20, vec);\n\n  std::vector<float> theta(87);\n  std::fill(theta.begin(), theta.end(), 1);\n  model.updateOutputModel({ 28, 3 }, theta);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = 1.0f;\n  }\n  model.resetHiddenCellCPU();\n\n  model.GPUSetup();\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 8, num_rollouts>::Ones();\n  Eigen::VectorXf output = model.getZeroOutputVector();\n  Eigen::VectorXf input = model.getZeroInputVector();\n\n  std::array<float, 5> true_vals = { 28.28055, 28.901096, 28.986588, 28.998184, 28.999756 };\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 3>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n\n    for (int step = 1; step < 6; step++)\n    {\n      launchForwardTestKernel<LSTMHelper<>, 8, 3, 32>(model, input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        model.resetHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < 8; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < 3; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n          EXPECT_FLOAT_EQ(output(dim), true_vals[step - 1]) << \"at dim \" << dim << \" step \" << step;\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, forwardGPUCompareNoShared)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<false> model(8, 20, vec);\n  std::vector<float> theta_vec(87);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  model.updateOutputModel({ 28, 3 }, theta_vec);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = distribution(generator);\n  }\n  model.resetHiddenCellCPU();\n\n  model.GPUSetup();\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 8, num_rollouts>::Random();\n  Eigen::VectorXf output = model.getZeroOutputVector();\n  Eigen::VectorXf input = model.getZeroInputVector();\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 3>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n    for (int step = 1; step < 6; step++)\n    {\n      launchForwardTestKernel<LSTMHelper<false>, 8, 3, 32>(model, input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        model.resetHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < 8; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < 3; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, forwardGPUCompareShared)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<> model(8, 20, vec);\n  std::vector<float> theta_vec(87);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  model.updateOutputModel({ 28, 3 }, theta_vec);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = distribution(generator);\n  }\n  model.resetHiddenCellCPU();\n\n  model.GPUSetup();\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 8, num_rollouts>::Random();\n  Eigen::VectorXf output = model.getZeroOutputVector();\n  Eigen::VectorXf input = model.getZeroInputVector();\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 3>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n    for (int step = 1; step < 6; step++)\n    {\n      launchForwardTestKernel<LSTMHelper<>, 8, 3, 32>(model, input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        model.resetHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < 8; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < 3; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, forwardGPUComparePreloadNoShared)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<false> model(8, 20, vec);\n  std::vector<float> theta_vec(87);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  model.updateOutputModel({ 28, 3 }, theta_vec);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = distribution(generator);\n  }\n  model.resetHiddenCellCPU();\n\n  model.GPUSetup();\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 8, num_rollouts>::Random();\n  Eigen::VectorXf output = model.getZeroOutputVector();\n  Eigen::VectorXf input = model.getZeroInputVector();\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 3>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n    for (int step = 1; step < 6; step++)\n    {\n      launchForwardTestKernelPreload<LSTMHelper<false>, 8, 3, 32>(model, input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        model.resetHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < 8; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < 3; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, forwardGPUComparePreloadShared)\n{\n  const int num_rollouts = 1000;\n\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<> model(8, 20, vec);\n  std::vector<float> theta_vec(87);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  model.updateOutputModel({ 28, 3 }, theta_vec);\n\n  float* weights_d = model.getWeights();\n  for (int i = 0; i < model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * model.getHiddenDim(); i++)\n  {\n    weights_d[i] = distribution(generator);\n  }\n  model.resetHiddenCellCPU();\n\n  model.GPUSetup();\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs;\n  inputs = Eigen::Matrix<float, 8, num_rollouts>::Random();\n  Eigen::VectorXf output = model.getZeroOutputVector();\n  Eigen::VectorXf input = model.getZeroInputVector();\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 3>> output_arr(num_rollouts);\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int state_index = 0; state_index < num_rollouts; state_index++)\n    {\n      for (int dim = 0; dim < input_arr[0].size(); dim++)\n      {\n        input_arr[state_index][dim] = inputs.col(state_index)(dim);\n      }\n    }\n    for (int step = 1; step < 6; step++)\n    {\n      launchForwardTestKernelPreload<LSTMHelper<>, 8, 3, 32>(model, input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        model.resetHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < 8; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < 3; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMHelperTest, forwardGPUSpeedTest)\n{\n  const int num_rollouts = 15000;\n\n  std::vector<int> vec = { 28, 3 };\n  LSTMHelper<> shared_model(8, 20, vec);\n  LSTMHelper<false> global_model(8, 20, vec);\n  std::vector<float> theta_vec(87);\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  shared_model.updateOutputModel({ 28, 3 }, theta_vec);\n  global_model.updateOutputModel({ 28, 3 }, theta_vec);\n\n  float* shared_weights_d = shared_model.getWeights();\n  float* global_weights_d = global_model.getWeights();\n  for (int i = 0; i < shared_model.getLSTMGrdSharedSizeBytes() / sizeof(float) + 2 * shared_model.getHiddenDim(); i++)\n  {\n    shared_weights_d[i] = distribution(generator);\n    global_weights_d[i] = shared_weights_d[i];\n  }\n  shared_model.resetHiddenCellCPU();\n\n  shared_model.GPUSetup();\n\n  global_model.resetHiddenCellCPU();\n\n  global_model.GPUSetup();\n\n  Eigen::Matrix<float, 8, 1000> inputs;\n  inputs = Eigen::Matrix<float, 8, 1000>::Random();\n  Eigen::VectorXf output = shared_model.getZeroOutputVector();\n  Eigen::VectorXf input = shared_model.getZeroInputVector();\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 3>> output_arr(num_rollouts);\n\n  for (int state_index = 0; state_index < num_rollouts; state_index++)\n  {\n    for (int dim = 0; dim < input_arr[0].size(); dim++)\n    {\n      input_arr[state_index][dim] = inputs.col(state_index % 1000)(dim);\n    }\n  }\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    // TODO the way of doing this timing is bad\n    auto shared_start = std::chrono::steady_clock::now();\n    launchForwardTestKernel<LSTMHelper<>, 8, 3, 32>(shared_model, input_arr, output_arr, y_dim, 500);\n    auto shared_stop = std::chrono::steady_clock::now();\n\n    auto global_start = std::chrono::steady_clock::now();\n    launchForwardTestKernel<LSTMHelper<false>, 8, 3, 32>(global_model, input_arr, output_arr, y_dim, 500);\n    auto global_stop = std::chrono::steady_clock::now();\n\n    float shared_time_ms = mppi::math::timeDiffms(shared_stop, shared_start);\n    float global_time_ms = mppi::math::timeDiffms(global_stop, global_start);\n    std::cout << \"for y dim \" << y_dim << \" got shared: \" << shared_time_ms << std::endl;\n    std::cout << \"for y dim \" << y_dim << \" got global: \" << global_time_ms << std::endl;\n  }\n}\n\n// TEST_F(LSTMHelperTest, TestComputeGradComputationFinite)\n// {\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>> model;\n//   std::vector<float> theta(1412);\n//   for (int i = 0; i < 1412; i++)\n//   {\n//     theta[i] = distribution(generator);\n//   }\n//   model.updateModel({ 6, 32, 32, 4 }, theta);\n//\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx numeric_jac;\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx analytic_jac;\n//\n//   for (int i = 0; i < 1000; i++)\n//   {\n//     LSTMHelper<LSTMParams<6, 32, 32, 4>>::input_array input;\n//     input = LSTMHelper<LSTMParams<6, 32, 32, 4>>::input_array::Random();\n//\n//     model.computeGrad(input, analytic_jac);\n//     EXPECT_TRUE(analytic_jac.allFinite());\n//   }\n// }\n//\n// TEST_F(LSTMHelperTest, TestComputeGradComputationCompare)\n// {\n//   GTEST_SKIP();\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>> model;\n//   std::vector<float> theta(1412);\n//   for (int i = 0; i < 1412; i++)\n//   {\n//     theta[i] = distribution(generator);\n//   }\n//   model.updateModel({ 6, 32, 32, 4 }, theta);\n//\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx numeric_jac;\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx analytic_jac;\n//\n//   LSTMHelper<LSTMParams<6, 32, 32, 4>>::input_array input;\n//   input << 1, 2, 3, 4, 5, 6;\n//\n//   model.computeGrad(input, analytic_jac);\n//\n//   // numeric_jac = num_diff.df(input, numeric_jac);\n//\n//   ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\"\n//                                                        << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n//                                                        << analytic_jac;\n// }\n"
  },
  {
    "path": "tests/nn_helpers/lstm_lstm_helper_test.cu",
    "content": "// #include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n#include <math.h>\n\n#include <mppi/utils/nn_helpers/lstm_lstm_helper.cuh>\n// Auto-generated header file\n#include <test_networks.h>\n#include <kernel_tests/utils/network_helper_kernel_test.cuh>\n#include \"mppi/ddp/ddp_dynamics.h\"\n\nclass LSTMLSTMHelperTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(0.0, 1.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n  std::vector<int> init_output_layers = { 68, 100, 20 };\n  std::vector<int> output_layers = { 18, 2 };\n  const int init_hidden_dim = 60;\n  const int hidden_dim = 10;\n  const int init_input_dim = 8;\n  const int input_dim = 8;\n  const int output_dim = 2;\n  const int init_len = 6;\n};\n\n// template class FNNParams<18, 3>;\n// template class FNNParams<68, 100, 20>;\n// template class LSTMParams<8, 10>;\n// template class LSTMParams<8, 60>;\n// typedef FNNParams<18, 3> FNN_PARAMS;\n// typedef FNNParams<68, 100, 20> FNN_INIT_PARAMS;\n// typedef LSTMHelper<LSTMParams<8, 10>, FNN_PARAMS> LSTM;\n// typedef LSTMHelper<LSTMParams<8, 60>, FNN_INIT_PARAMS> INIT_LSTM;\n//\n// template class LSTMLSTMHelper<INIT_LSTM, LSTM, 10>;\n//\n// typedef LSTMLSTMHelper<INIT_LSTM, LSTM, 10> T;\n\nTEST_F(LSTMLSTMHelperTest, BindStreamAndConstructor)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n\n  LSTMLSTMHelper<> helper(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers,\n                          init_len, stream);\n\n  EXPECT_EQ(helper.getLSTMModel()->stream_, stream);\n  EXPECT_NE(helper.getLSTMModel(), nullptr);\n\n  EXPECT_EQ(helper.getLSTMModel()->getInputDim(), input_dim);\n  EXPECT_EQ(helper.getLSTMModel()->getOutputDim(), output_dim);\n  EXPECT_EQ(helper.getLSTMModel()->getHiddenDim(), hidden_dim);\n\n  EXPECT_EQ(helper.getInitModel()->getInputDim(), init_input_dim);\n  EXPECT_EQ(helper.getInitModel()->getOutputDim(), 2 * hidden_dim);\n  EXPECT_EQ(helper.getInitModel()->getHiddenDim(), init_hidden_dim);\n\n  auto init_lstm = helper.getInitModel();\n  EXPECT_NE(init_lstm, nullptr);\n\n  auto hidden = init_lstm->getHiddenState();\n  auto cell = init_lstm->getCellState();\n  for (int i = 0; i < init_lstm->getHiddenDim(); i++)\n\n  {\n    EXPECT_FLOAT_EQ(hidden(i), 0.0f);\n    EXPECT_FLOAT_EQ(cell(i), 0.0f);\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, BindStreamAndConstructorConfigLSTM)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n  LSTMConfig init_config, pred_config;\n  init_config.input_dim = init_input_dim;\n  init_config.hidden_dim = init_hidden_dim;\n  init_config.output_layers = init_output_layers;\n  pred_config.input_dim = input_dim;\n  pred_config.hidden_dim = hidden_dim;\n  pred_config.output_layers = output_layers;\n\n  LSTMLSTMHelper<> helper(init_config, pred_config, init_len, stream);\n\n  EXPECT_EQ(helper.getLSTMModel()->stream_, stream);\n  EXPECT_NE(helper.getLSTMModel(), nullptr);\n\n  EXPECT_EQ(helper.getLSTMModel()->getInputDim(), input_dim);\n  EXPECT_EQ(helper.getLSTMModel()->getOutputDim(), output_dim);\n  EXPECT_EQ(helper.getLSTMModel()->getHiddenDim(), hidden_dim);\n\n  EXPECT_EQ(helper.getInitModel()->getInputDim(), init_input_dim);\n  EXPECT_EQ(helper.getInitModel()->getOutputDim(), 2 * hidden_dim);\n  EXPECT_EQ(helper.getInitModel()->getHiddenDim(), init_hidden_dim);\n\n  auto init_lstm = helper.getInitModel();\n  EXPECT_NE(init_lstm, nullptr);\n\n  auto hidden = init_lstm->getHiddenState();\n  auto cell = init_lstm->getCellState();\n  for (int i = 0; i < init_lstm->getHiddenDim(); i++)\n\n  {\n    EXPECT_FLOAT_EQ(hidden(i), 0.0f);\n    EXPECT_FLOAT_EQ(cell(i), 0.0f);\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, BindStreamAndConstructorConfigLSTMLSTM)\n{\n  cudaStream_t stream;\n  HANDLE_ERROR(cudaStreamCreate(&stream));\n  LSTMLSTMConfig config;\n  config.init_config.input_dim = init_input_dim;\n  config.init_config.hidden_dim = init_hidden_dim;\n  config.init_config.output_layers = init_output_layers;\n  config.pred_config.input_dim = input_dim;\n  config.pred_config.hidden_dim = hidden_dim;\n  config.pred_config.output_layers = output_layers;\n  config.init_len = init_len;\n\n  LSTMLSTMHelper<> helper(config, stream);\n\n  EXPECT_EQ(helper.getLSTMModel()->stream_, stream);\n  EXPECT_NE(helper.getLSTMModel(), nullptr);\n\n  EXPECT_EQ(helper.getLSTMModel()->getInputDim(), input_dim);\n  EXPECT_EQ(helper.getLSTMModel()->getOutputDim(), output_dim);\n  EXPECT_EQ(helper.getLSTMModel()->getHiddenDim(), hidden_dim);\n\n  EXPECT_EQ(helper.getInitModel()->getInputDim(), init_input_dim);\n  EXPECT_EQ(helper.getInitModel()->getOutputDim(), 2 * hidden_dim);\n  EXPECT_EQ(helper.getInitModel()->getHiddenDim(), init_hidden_dim);\n\n  auto init_lstm = helper.getInitModel();\n  EXPECT_NE(init_lstm, nullptr);\n\n  auto hidden = init_lstm->getHiddenState();\n  auto cell = init_lstm->getCellState();\n  for (int i = 0; i < init_lstm->getHiddenDim(); i++)\n\n  {\n    EXPECT_FLOAT_EQ(hidden(i), 0.0f);\n    EXPECT_FLOAT_EQ(cell(i), 0.0f);\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, initializeLSTMLSTMTest)\n{\n  LSTMLSTMHelper<> helper(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers,\n                          init_len);\n  Eigen::Matrix<float, 8, 10> buffer;\n  buffer.setOnes();\n\n  helper.getInitModel()->setAllValues(1.0);\n\n  helper.initializeLSTM(buffer);\n\n  auto lstm = helper.getLSTMModel();\n  auto hidden = lstm->getHiddenState();\n  auto cell = lstm->getHiddenState();\n  for (int i = 0; i < hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(hidden(i), 101.0f);\n    EXPECT_FLOAT_EQ(cell(i), 101.0f);\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, GPUSetupAndParamsCheck)\n{\n  LSTMLSTMHelper<> model(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers,\n                         init_len);\n\n  std::vector<float> theta_vec(model.getLSTMModel()->getOutputModel()->getNumParams());\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n\n  std::vector<float> lstm_vec(model.getLSTMModel()->getNumParams());\n  for (int i = 0; i < lstm_vec.size(); i++)\n  {\n    lstm_vec[i] = distribution(generator);\n  }\n  model.getLSTMModel()->setAllValues(lstm_vec, theta_vec);\n\n  int grid_dim = 5;\n\n  std::vector<float> lstm_params(grid_dim);\n  std::vector<float> shared_lstm_params(grid_dim);\n  std::vector<float> fnn_params(grid_dim);\n  std::vector<float> shared_fnn_params(grid_dim);\n\n  EXPECT_EQ(model.getLSTMModel()->GPUMemStatus_, false);\n  EXPECT_EQ(model.getLSTMModel()->network_d_, nullptr);\n  EXPECT_NE(model.getLSTMModel()->getOutputModel(), nullptr);\n  EXPECT_EQ(model.getInitModel()->GPUMemStatus_, false);\n  EXPECT_EQ(model.getInitModel()->network_d_, nullptr);\n  EXPECT_NE(model.getInitModel()->getOutputModel(), nullptr);\n\n  model.GPUSetup();\n\n  EXPECT_EQ(model.getLSTMModel()->GPUMemStatus_, true);\n  EXPECT_NE(model.getLSTMModel()->network_d_, nullptr);\n  EXPECT_NE(model.getLSTMModel()->getOutputModel(), nullptr);\n  EXPECT_EQ(model.getInitModel()->GPUMemStatus_, false);\n  EXPECT_EQ(model.getInitModel()->network_d_, nullptr);\n  EXPECT_NE(model.getInitModel()->getOutputModel(), nullptr);\n\n  // launch kernel\n  launchParameterCheckTestKernel<LSTMHelper<>>(*model.getLSTMModel(), lstm_params, shared_lstm_params, fnn_params,\n                                               shared_fnn_params, grid_dim);\n\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getGrdSharedSizeBytes() / sizeof(float), 44);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getBlkSharedSizeBytes() / sizeof(float), 36);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNumParams(), 38);\n\n  int grd_size = model.getLSTMModel()->getOutputModel()->getGrdSharedSizeBytes() / sizeof(float);\n\n  for (int grid = 0; grid < grid_dim; grid++)\n  {\n    // ensure that the output nn matches\n    for (int i = 0; i < theta_vec.size(); i++)\n    {\n      EXPECT_FLOAT_EQ(fnn_params[grid * grd_size + i], theta_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n\n    const int h = model.getLSTMModel()->getHiddenDim();\n    const int hh = h * h;\n    const int ih = model.getLSTMModel()->getInputDim() * model.getLSTMModel()->getHiddenDim();\n    int shift = (h * 6 + hh * 4 + ih * 4) * grid;\n\n    for (int i = 0; i < hh; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + i], lstm_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + hh + i], lstm_vec[hh + i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 2 * hh + i], lstm_vec[2 * hh + i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 3 * hh + i], lstm_vec[3 * hh + i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n\n    for (int i = 0; i < ih; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + i], lstm_vec[4 * hh + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + ih + i], lstm_vec[4 * hh + ih + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 2 * ih + i], lstm_vec[4 * hh + 2 * ih + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 3 * ih + i], lstm_vec[4 * hh + 3 * ih + i]);\n    }\n\n    for (int i = 0; i < h; i++)\n    {\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + i], lstm_vec[4 * hh + 4 * ih + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + h + i], lstm_vec[4 * hh + 4 * ih + h + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 2 * h + i], lstm_vec[4 * hh + 4 * ih + 2 * h + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 3 * h + i], lstm_vec[4 * hh + 4 * ih + 3 * h + i]);\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 4 * h + i], lstm_vec[4 * hh + 4 * ih + 4 * h + i])\n          << \"at index \" << i;\n      EXPECT_FLOAT_EQ(lstm_params[shift + 4 * hh + 4 * ih + 5 * h + i], lstm_vec[4 * hh + 4 * ih + 5 * h + i])\n          << \"at index \" << i;\n    }\n  }\n\n  for (int grid = 0; grid < grid_dim; grid++)\n  {\n    // ensure that the output nn matches\n    for (int i = 0; i < theta_vec.size(); i++)\n    {\n      EXPECT_FLOAT_EQ(shared_fnn_params[grid * grd_size + i], theta_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n    }\n\n    const int h = model.getLSTMModel()->getHiddenDim();\n    const int hh = h * h;\n    const int ih = model.getLSTMModel()->getInputDim() * model.getLSTMModel()->getHiddenDim();\n    int shift = (h * 6 + hh * 4 + ih * 4) * grid;\n\n    for (int i = 0; i < hh; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + i], lstm_vec[i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + hh + i], lstm_vec[hh + i]) << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 2 * hh + i], lstm_vec[2 * hh + i])\n          << \"at grid \" << grid << \" at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 3 * hh + i], lstm_vec[3 * hh + i])\n          << \"at grid \" << grid << \" at index \" << i;\n    }\n\n    for (int i = 0; i < ih; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + i], lstm_vec[4 * hh + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + ih + i], lstm_vec[4 * hh + ih + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 2 * ih + i], lstm_vec[4 * hh + 2 * ih + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 3 * ih + i], lstm_vec[4 * hh + 3 * ih + i]);\n    }\n\n    for (int i = 0; i < h; i++)\n    {\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + i], lstm_vec[4 * hh + 4 * ih + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + h + i], lstm_vec[4 * hh + 4 * ih + h + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 2 * h + i], lstm_vec[4 * hh + 4 * ih + 2 * h + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 3 * h + i], lstm_vec[4 * hh + 4 * ih + 3 * h + i]);\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 4 * h + i], lstm_vec[4 * hh + 4 * ih + 4 * h + i])\n          << \"at index \" << i;\n      EXPECT_FLOAT_EQ(shared_lstm_params[shift + 4 * hh + 4 * ih + 5 * h + i], lstm_vec[4 * hh + 4 * ih + 5 * h + i])\n          << \"at index \" << i;\n    }\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, LoadModelPathTest)\n{\n  std::string path = mppi::tests::test_lstm_lstm;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  LSTMLSTMHelper<> model(path);\n\n  EXPECT_EQ(model.getLSTMModel()->getInputDim(), 3);\n  EXPECT_EQ(model.getLSTMModel()->getHiddenDim(), 25);\n  EXPECT_EQ(model.getLSTMModel()->getOutputDim(), 2);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[0], 28);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[1], 30);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[2], 30);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[3], 2);\n\n  EXPECT_EQ(model.getInitModel()->getInputDim(), 3);\n  EXPECT_EQ(model.getInitModel()->getHiddenDim(), 60);\n  EXPECT_EQ(model.getInitModel()->getOutputDim(), 50);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[0], 63);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[1], 15);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[2], 15);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[3], 50);\n\n  EXPECT_EQ(model.getInitLen(), 6);\n  assert(model.getInitLen() == 6);\n\n  cnpy::npz_t input_outputs = cnpy::npz_load(path);\n  double* inputs = input_outputs.at(\"input\").data<double>();\n  double* outputs = input_outputs.at(\"output\").data<double>();\n  double* init_inputs = input_outputs.at(\"init_input\").data<double>();\n  double* init_hidden = input_outputs.at(\"init/hidden\").data<double>();\n  double* init_cell = input_outputs.at(\"init/cell\").data<double>();\n  double* hidden = input_outputs.at(\"hidden\").data<double>();\n  double* cell = input_outputs.at(\"cell\").data<double>();\n\n  const int hidden_dim = model.getLSTMModel()->getHiddenDim();\n  const int input_dim = model.getLSTMModel()->getInputDim();\n  const int output_dim = model.getLSTMModel()->getOutputDim();\n  const int init_hidden_dim = model.getInitModel()->getHiddenDim();\n  const int init_input_dim = model.getInitModel()->getInputDim();\n\n  // TOOD figure out the number of points from the input\n  int init_len = model.getInitLen();\n  int num_points = input_outputs.at(\"num_points\").data<int>()[0];\n  int T = input_outputs.at(\"output\").shape[0] / (num_points * output_dim);\n\n  EXPECT_EQ(num_points, 3);\n  EXPECT_EQ(input_dim, init_input_dim);\n\n  double tol = 1e-5;\n\n  auto input = model.getLSTMModel()->getZeroInputVector();\n  auto output = model.getLSTMModel()->getZeroOutputVector();\n\n  for (int point = 0; point < num_points; point++)\n  {\n    // run the init network and ensure initial hidden/cell match\n    Eigen::MatrixXf buffer(init_input_dim, init_len);\n    model.resetInitHiddenCPU();\n    for (int t = 0; t < init_len; t++)\n    {\n      for (int i = 0; i < init_input_dim; i++)\n      {\n        buffer(i, t) = init_inputs[point * init_len * init_input_dim + t * init_input_dim + i];\n      }\n    }\n    model.initializeLSTM(buffer);\n\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      EXPECT_NEAR(model.getLSTMModel()->getHiddenState()(i), init_hidden[hidden_dim * point + i], tol)\n          << \"at point \" << point << \" index \" << i;\n      EXPECT_NEAR(model.getLSTMModel()->getCellState()(i), init_cell[hidden_dim * point + i], tol);\n    }\n    for (int t = 0; t < T; t++)\n    {\n      for (int i = 0; i < input_dim; i++)\n      {\n        input(i) = inputs[point * T * input_dim + t * input_dim + i];\n      }\n      model.forward(input, output);\n\n      for (int i = 0; i < output_dim; i++)\n      {\n        EXPECT_NEAR(output[i], outputs[point * T * output_dim + t * output_dim + i], tol)\n            << \"point \" << point << \" T \" << t << \" at dim \" << i;\n      }\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_NEAR(model.getLSTMModel()->getHiddenState()(i), hidden[point * T * hidden_dim + hidden_dim * t + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n        EXPECT_NEAR(model.getLSTMModel()->getCellState()(i), cell[point * T * hidden_dim + hidden_dim * t + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n      }\n    }\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, LoadModelPathTestLongBuffer)\n{\n  std::string path = mppi::tests::test_lstm_lstm;\n  if (!fileExists(path))\n  {\n    std::cerr << \"Could not load neural net model at path: \" << path.c_str();\n    exit(-1);\n  }\n  LSTMLSTMHelper<> model(path);\n\n  EXPECT_EQ(model.getLSTMModel()->getInputDim(), 3);\n  EXPECT_EQ(model.getLSTMModel()->getHiddenDim(), 25);\n  EXPECT_EQ(model.getLSTMModel()->getOutputDim(), 2);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[0], 28);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[1], 30);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[2], 30);\n  EXPECT_EQ(model.getLSTMModel()->getOutputModel()->getNetStructurePtr()[3], 2);\n\n  EXPECT_EQ(model.getInitModel()->getInputDim(), 3);\n  EXPECT_EQ(model.getInitModel()->getHiddenDim(), 60);\n  EXPECT_EQ(model.getInitModel()->getOutputDim(), 50);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[0], 63);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[1], 15);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[2], 15);\n  EXPECT_EQ(model.getInitModel()->getOutputModel()->getNetStructurePtr()[3], 50);\n\n  cnpy::npz_t input_outputs = cnpy::npz_load(path);\n  double* inputs = input_outputs.at(\"input\").data<double>();\n  double* outputs = input_outputs.at(\"output\").data<double>();\n  double* init_inputs = input_outputs.at(\"init_input\").data<double>();\n  double* init_hidden = input_outputs.at(\"init/hidden\").data<double>();\n  double* init_cell = input_outputs.at(\"init/cell\").data<double>();\n  double* hidden = input_outputs.at(\"hidden\").data<double>();\n  double* cell = input_outputs.at(\"cell\").data<double>();\n\n  const int hidden_dim = model.getLSTMModel()->getHiddenDim();\n  const int input_dim = model.getLSTMModel()->getInputDim();\n  const int output_dim = model.getLSTMModel()->getOutputDim();\n  const int init_hidden_dim = model.getInitModel()->getHiddenDim();\n  const int init_input_dim = model.getInitModel()->getInputDim();\n\n  // TOOD figure out the number of points from the input\n  const int init_len = model.getInitLen();\n  EXPECT_EQ(model.getInitLen(), 6);\n  int num_points = input_outputs.at(\"num_points\").data<int>()[0];\n  int T = input_outputs.at(\"output\").shape[0] / (num_points * output_dim);\n\n  double tol = 1e-5;\n\n  auto input = model.getLSTMModel()->getZeroInputVector();\n  auto output = model.getLSTMModel()->getZeroOutputVector();\n\n  for (int point = 0; point < num_points; point++)\n  {\n    // run the init network and ensure initial hidden/cell match\n    Eigen::MatrixXf buffer(init_input_dim, init_len + 30);\n    model.resetInitHiddenCPU();\n    for (int t = 0; t < init_len; t++)\n    {\n      for (int i = 0; i < init_input_dim; i++)\n      {\n        buffer(i, t + 30) = init_inputs[point * init_len * init_input_dim + t * init_input_dim + i];\n      }\n    }\n    model.initializeLSTM(buffer);\n\n    for (int i = 0; i < hidden_dim; i++)\n    {\n      EXPECT_NEAR(model.getLSTMModel()->getHiddenState()(i), init_hidden[hidden_dim * point + i], tol)\n          << \"at point \" << point << \" index \" << i;\n      EXPECT_NEAR(model.getLSTMModel()->getCellState()(i), init_cell[hidden_dim * point + i], tol);\n    }\n    for (int t = 0; t < T; t++)\n    {\n      for (int i = 0; i < input_dim; i++)\n      {\n        input(i) = inputs[point * T * input_dim + t * input_dim + i];\n      }\n      model.forward(input, output);\n\n      for (int i = 0; i < output_dim; i++)\n      {\n        EXPECT_NEAR(output[i], outputs[point * T * output_dim + t * output_dim + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n      }\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_NEAR(model.getLSTMModel()->getHiddenState()(i), hidden[point * T * hidden_dim + hidden_dim * t + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n        EXPECT_NEAR(model.getLSTMModel()->getCellState()(i), cell[point * T * hidden_dim + hidden_dim * t + i], tol)\n            << \"point \" << point << \" at dim \" << i;\n      }\n    }\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, forwardCPU)\n{\n  LSTMLSTMHelper<> model(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers, 10);\n\n  model.getLSTMModel()->setAllValues(1.0f);\n  model.getInitModel()->setAllValues(0.1f);\n  model.getInitModel()->getOutputModel()->setAllWeights(0.01f);\n\n  for (int i = 0; i < init_hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getInitModel()->getHiddenState()(i), 0.1);\n    EXPECT_FLOAT_EQ(model.getInitModel()->getCellState()(i), 0.1);\n  }\n\n  for (int i = 0; i < hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getLSTMModel()->getHiddenState()(i), 1.0);\n    EXPECT_FLOAT_EQ(model.getLSTMModel()->getCellState()(i), 1.0);\n  }\n\n  auto input = model.getLSTMModel()->getZeroInputVector();\n  auto output = model.getLSTMModel()->getZeroOutputVector();\n  input.setOnes();\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 18.640274);\n  EXPECT_FLOAT_EQ(output[1], 18.640274);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 18.950548);\n  EXPECT_FLOAT_EQ(output[1], 18.950548);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 18.993294);\n  EXPECT_FLOAT_EQ(output[1], 18.993294);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 18.999092);\n  EXPECT_FLOAT_EQ(output[1], 18.999092);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 18.999878);\n  EXPECT_FLOAT_EQ(output[1], 18.999878);\n\n  auto buffer = model.getEmptyBufferMatrix(10);\n  buffer.setOnes();\n  buffer = buffer * 0.1;\n\n  model.initializeLSTM(buffer);\n\n  for (int i = 0; i < init_hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getInitModel()->getHiddenState()(i), 0.99790788);\n    EXPECT_FLOAT_EQ(model.getInitModel()->getCellState()(i), 9.2190571);\n  }\n  for (int i = 0; i < hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getLSTMModel()->getHiddenState()(i), 0.558857381);\n    EXPECT_FLOAT_EQ(model.getLSTMModel()->getCellState()(i), 0.558857381);\n  }\n\n  input *= 0.1;\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 10.945133);\n  EXPECT_FLOAT_EQ(output[1], 10.945133);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 11.680509);\n  EXPECT_FLOAT_EQ(output[1], 11.680509);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 11.783686);\n  EXPECT_FLOAT_EQ(output[1], 11.783686);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 11.797728);\n  EXPECT_FLOAT_EQ(output[1], 11.797728);\n\n  model.forward(input, output);\n  EXPECT_FLOAT_EQ(output[0], 11.79963);\n  EXPECT_FLOAT_EQ(output[1], 11.79963);\n}\n\nTEST_F(LSTMLSTMHelperTest, forwardGPU)\n{\n  const int num_rollouts = 1;\n\n  LSTMLSTMHelper<> model(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers, 10);\n\n  model.getLSTMModel()->setAllValues(1.0f);\n  model.getInitModel()->setAllValues(0.1f);\n  model.getInitModel()->getOutputModel()->setAllWeights(0.01f);\n\n  model.GPUSetup();\n\n  std::array<float, 5> true_vals = { 10.945133, 11.680509, 11.783686, 11.797728, 11.79963 };\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 2>> output_arr(num_rollouts);\n\n  auto input = model.getLSTMModel()->getZeroInputVector();\n  auto output = model.getLSTMModel()->getZeroOutputVector();\n  input.setOnes();\n  input = input * 0.1;\n\n  for (int i = 0; i < init_hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getInitModel()->getHiddenState()(i), 0.1);\n    EXPECT_FLOAT_EQ(model.getInitModel()->getCellState()(i), 0.1);\n  }\n\n  for (int i = 0; i < hidden_dim; i++)\n  {\n    EXPECT_FLOAT_EQ(model.getLSTMModel()->getHiddenState()(i), 1.0);\n    EXPECT_FLOAT_EQ(model.getLSTMModel()->getCellState()(i), 1.0);\n  }\n\n  auto buffer = model.getEmptyBufferMatrix(10);\n  buffer.setOnes();\n  buffer = buffer * 0.1;\n\n  for (int state_index = 0; state_index < num_rollouts; state_index++)\n  {\n    for (int dim = 0; dim < input_arr[0].size(); dim++)\n    {\n      input_arr[state_index][dim] = input(dim);\n    }\n  }\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int step = 1; step < 6; step++)\n    {\n      model.initializeLSTM(buffer);\n\n      for (int i = 0; i < init_hidden_dim; i++)\n      {\n        EXPECT_FLOAT_EQ(model.getInitModel()->getHiddenState()(i), 0.99790788);\n        EXPECT_FLOAT_EQ(model.getInitModel()->getCellState()(i), 9.2190571);\n      }\n      for (int i = 0; i < hidden_dim; i++)\n      {\n        EXPECT_FLOAT_EQ(model.getLSTMModel()->getHiddenState()(i), 0.558857381);\n        EXPECT_FLOAT_EQ(model.getLSTMModel()->getCellState()(i), 0.558857381);\n      }\n      launchForwardTestKernel<LSTMHelper<>, 8, 2, 32>(*model.getLSTMModel(), input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        // model.resetInitHiddenCPU();\n        model.resetLSTMHiddenCellCPU();\n        input.setOnes();\n        input = input * 0.1;\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < input_dim; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < output_dim; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4)\n              << \"at index \" << point << \" with y_dim \" << y_dim << \" dim \" << dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n          EXPECT_FLOAT_EQ(output(dim), true_vals[step - 1]) << \"at dim \" << dim << \" step \" << step;\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, forwardGPUCompareShared)\n{\n  const int num_rollouts = 3000;\n\n  LSTMLSTMHelper<> model(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers,\n                         init_len);\n\n  std::vector<float> theta_vec(model.getLSTMModel()->getOutputModel()->getNumParams());\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  std::vector<float> lstm_vec(model.getLSTMModel()->getNumParams());\n  for (int i = 0; i < lstm_vec.size(); i++)\n  {\n    lstm_vec[i] = distribution(generator);\n  }\n  model.getLSTMModel()->setAllValues(lstm_vec, theta_vec);\n\n  theta_vec.resize(model.getInitModel()->getOutputModel()->getNumParams());\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  lstm_vec.resize(model.getInitModel()->getNumParams());\n  for (int i = 0; i < lstm_vec.size(); i++)\n  {\n    lstm_vec[i] = distribution(generator);\n  }\n  model.getInitModel()->setAllValues(lstm_vec, theta_vec);\n\n  model.GPUSetup();\n\n  auto buffer = model.getEmptyBufferMatrix(10);\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 2>> output_arr(num_rollouts);\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs = Eigen::Matrix<float, 8, num_rollouts>::Random();\n  auto input = model.getLSTMModel()->getZeroInputVector();\n  auto output = model.getLSTMModel()->getZeroOutputVector();\n\n  for (int state_index = 0; state_index < num_rollouts; state_index++)\n  {\n    for (int dim = 0; dim < input_arr[0].size(); dim++)\n    {\n      input_arr[state_index][dim] = inputs.col(state_index)(dim);\n    }\n  }\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int step = 1; step < 6; step++)\n    {\n      buffer.setRandom();\n      model.initializeLSTM(buffer);\n\n      launchForwardTestKernel<LSTMHelper<>, 8, 2, 32>(*model.getLSTMModel(), input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        // model.resetInitHiddenCPU();\n        model.resetLSTMHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < input_dim; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < output_dim; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4)\n              << \"at index \" << point << \" with y_dim \" << y_dim << \" dim \" << dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        }\n      }\n    }\n  }\n}\n\nTEST_F(LSTMLSTMHelperTest, forwardGPUCompareNoShared)\n{\n  const int num_rollouts = 3000;\n\n  LSTMLSTMHelper<false> model(init_input_dim, init_hidden_dim, init_output_layers, input_dim, hidden_dim, output_layers,\n                              init_len);\n\n  std::vector<float> theta_vec(model.getLSTMModel()->getOutputModel()->getNumParams());\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  std::vector<float> lstm_vec(model.getLSTMModel()->getNumParams());\n  for (int i = 0; i < lstm_vec.size(); i++)\n  {\n    lstm_vec[i] = distribution(generator);\n  }\n  model.getLSTMModel()->setAllValues(lstm_vec, theta_vec);\n\n  theta_vec.resize(model.getInitModel()->getOutputModel()->getNumParams());\n  for (int i = 0; i < theta_vec.size(); i++)\n  {\n    theta_vec[i] = distribution(generator);\n  }\n  lstm_vec.resize(model.getInitModel()->getNumParams());\n  for (int i = 0; i < lstm_vec.size(); i++)\n  {\n    lstm_vec[i] = distribution(generator);\n  }\n  model.getInitModel()->setAllValues(lstm_vec, theta_vec);\n\n  model.GPUSetup();\n\n  auto buffer = model.getEmptyBufferMatrix(10);\n\n  std::vector<std::array<float, 8>> input_arr(num_rollouts);\n  std::vector<std::array<float, 2>> output_arr(num_rollouts);\n\n  Eigen::Matrix<float, 8, num_rollouts> inputs = Eigen::Matrix<float, 8, num_rollouts>::Random();\n  auto input = model.getLSTMModel()->getZeroInputVector();\n  auto output = model.getLSTMModel()->getZeroOutputVector();\n\n  for (int state_index = 0; state_index < num_rollouts; state_index++)\n  {\n    for (int dim = 0; dim < input_arr[0].size(); dim++)\n    {\n      input_arr[state_index][dim] = inputs.col(state_index)(dim);\n    }\n  }\n\n  for (int y_dim = 1; y_dim < 16; y_dim++)\n  {\n    for (int step = 1; step < 6; step++)\n    {\n      buffer.setRandom();\n      model.initializeLSTM(buffer);\n\n      launchForwardTestKernel<LSTMHelper<false>, 8, 2, 32>(*model.getLSTMModel(), input_arr, output_arr, y_dim, step);\n      for (int point = 0; point < num_rollouts; point++)\n      {\n        // model.resetInitHiddenCPU();\n        model.resetLSTMHiddenCellCPU();\n        input = inputs.col(point);\n\n        for (int cpu_step = 0; cpu_step < step; cpu_step++)\n        {\n          model.forward(input, output);\n        }\n        for (int dim = 0; dim < input_dim; dim++)\n        {\n          EXPECT_NEAR(input(dim), input_arr[point][dim], 1e-4) << \"at index \" << point << \" with y_dim \" << y_dim;\n        }\n        for (int dim = 0; dim < output_dim; dim++)\n        {\n          EXPECT_NEAR(output(dim), output_arr[point][dim], 1e-4)\n              << \"at index \" << point << \" with y_dim \" << y_dim << \" dim \" << dim;\n          EXPECT_TRUE(isfinite(output_arr[point][dim]));\n        }\n      }\n    }\n  }\n}\n//\n// // TEST_F(LSTMLSTMHelperTest, TestComputeGradComputationFinite)\n// // {\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>> model;\n// //   std::vector<float> theta(1412);\n// //   for (int i = 0; i < 1412; i++)\n// //   {\n// //     theta[i] = distribution(generator);\n// //   }\n// //   model.updateModel({ 6, 32, 32, 4 }, theta);\n// //\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx numeric_jac;\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx analytic_jac;\n// //\n// //   for (int i = 0; i < 1000; i++)\n// //   {\n// //     LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::input_array input;\n// //     input = LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::input_array::Random();\n// //\n// //     model.computeGrad(input, analytic_jac);\n// //     EXPECT_TRUE(analytic_jac.allFinite());\n// //   }\n// // }\n// //\n// // TEST_F(LSTMLSTMHelperTest, TestComputeGradComputationCompare)\n// // {\n// //   GTEST_SKIP();\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>> model;\n// //   std::vector<float> theta(1412);\n// //   for (int i = 0; i < 1412; i++)\n// //   {\n// //     theta[i] = distribution(generator);\n// //   }\n// //   model.updateModel({ 6, 32, 32, 4 }, theta);\n// //\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx numeric_jac;\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::dfdx analytic_jac;\n// //\n// //   LSTMLSTMHelper<LSTMParams<6, 32, 32, 4>>::input_array input;\n// //   input << 1, 2, 3, 4, 5, 6;\n// //\n// //   model.computeGrad(input, analytic_jac);\n// //\n// //   // numeric_jac = num_diff.df(input, numeric_jac);\n// //\n// //   ASSERT_LT((numeric_jac - analytic_jac).norm(), 1e-3) << \"Numeric Jacobian\\n\"\n// //                                                        << numeric_jac << \"\\nAnalytic Jacobian\\n\"\n// //                                                        << analytic_jac;\n// // }\n"
  },
  {
    "path": "tests/sampling_distributions/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n  target_link_libraries(${T_NAME}\n                        ${GTEST_LIBRARIES}\n                        ${MPPI_HEADER_LIBRARY_NAME})\n  gtest_discover_tests(${T_NAME})\n  set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/sampling_distributions/colored_noise_tests.cu",
    "content": "//\n// Created by Bogdan on 12/26/21\n//\n\n#include <gtest/gtest.h>\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n\n#include <numeric>\n#include \"gtest/gtest.h\"\n\nvoid assert_float_rel_near(const float known, const float compute, float rel_err)\n{\n  float err = fabsf(compute - known) / known;\n  ASSERT_NEAR(known, compute, rel_err) << \"Relative error is \" << err;\n}\n\nTEST(cuFFT, checkErrorCode)\n{\n  cufftHandle plan;\n  cuComplex* input_d;\n  float* output_d;\n  // As this call is intended to cause issues, disable compiler warning\n  // src: https://stackoverflow.com/questions/14831051/how-to-disable-a-specific-nvcc-compiler-warnings\n  // https://stackoverflow.com/questions/56193080/how-do-i-apply-a-flag-setting-nvcc-pragma-to-only-a-few-lines-of-code\n  // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#nv-diagnostic-pragmas\n#pragma push\n#pragma diag_suppress = used_before_set\n  auto status = cufftExecC2R(plan, input_d, output_d);\n#pragma pop\n  std::string error_string = cufftGetErrorString(status);\n  // std::cout << error_string << std::endl;\n  EXPECT_TRUE(error_string == \"cuFFT was passed an invalid plan handle\");\n}\n\ntemplate <int C_DIM>\nstruct TestDynamicsParams : public DynamicsParams\n{\n  enum class ControlIndex : int\n  {\n    NUM_CONTROLS = C_DIM,\n  };\n  enum class OutputIndex : int\n  {\n    EMPTY = 0,\n    NUM_OUTPUTS\n  };\n};\n\ntemplate <class DYN_PARAMS_T>\nclass TestNoise : public ::testing::Test\n{\npublic:\n  const int NUM_TIMESTEPS = 250;\n  const int NUM_ROLLOUTS = 5000;\n  const int CONTROL_DIM = C_IND_CLASS(DYN_PARAMS_T, NUM_CONTROLS);\n  using SAMPLER_T = mppi::sampling_distributions::ColoredNoiseDistribution<DYN_PARAMS_T>;\n  using SAMPLER_PARAMS_T = typename SAMPLER_T::SAMPLING_PARAMS_T;\n\n  SAMPLER_T* sampler;\n  cudaStream_t stream;\n  curandGenerator_t* gen;\n\nprotected:\n  void SetUp() override\n  {\n    SAMPLER_PARAMS_T params;\n    for (int i = 0; i < CONTROL_DIM; i++)\n    {\n      params.exponents[i] = 0.0;\n    }\n    params.num_timesteps = NUM_TIMESTEPS;\n    params.num_rollouts = NUM_ROLLOUTS;\n    params.pure_noise_trajectories_percentage = 0.0;\n    params.offset_decay_rate = 0.0;\n\n    cudaStreamCreate(&stream);\n    gen = new curandGenerator_t();\n    curandCreateGenerator(gen, CURAND_RNG_PSEUDO_DEFAULT);\n    curandSetPseudoRandomGeneratorSeed(*gen, 42);\n    curandSetGeneratorOffset(*gen, 0);\n    curandSetStream(*gen, stream);\n    sampler = new SAMPLER_T(params, stream);\n    sampler->GPUSetup();\n  }\n\n  void TearDown() override\n  {\n    delete gen;\n    delete sampler;\n  }\n};\n\nusing DIFFERENT_CONTROL_DIMS =\n    ::testing::Types<TestDynamicsParams<1>, TestDynamicsParams<2>, TestDynamicsParams<3>, TestDynamicsParams<4>>;\n\nTYPED_TEST_SUITE(TestNoise, DIFFERENT_CONTROL_DIMS);\n\nTYPED_TEST(TestNoise, WhiteNoise)\n{\n  int full_buffer_size = this->NUM_ROLLOUTS * this->NUM_TIMESTEPS * this->CONTROL_DIM;\n  float* colored_noise_output = new float[full_buffer_size]{ 0 };\n\n  auto sampler_params = this->sampler->getParams();\n  for (int i = 0; i < this->CONTROL_DIM; i++)\n  {\n    sampler_params.exponents[i] = 0.0;\n    sampler_params.std_dev[i] = 1.0;\n  }\n  this->sampler->setParams(sampler_params);\n\n  this->sampler->generateSamples(0, 0, *(this->gen), false);\n  HANDLE_ERROR(cudaMemcpyAsync(colored_noise_output, this->sampler->getControlSample(0, 0, 0),\n                               sizeof(float) * full_buffer_size, cudaMemcpyDeviceToHost, this->stream));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream));\n\n  std::vector<int> num_within_std_dev(3, 0);\n  // Ignore first rollout as that will be all zeros\n  for (int i = this->NUM_TIMESTEPS * this->CONTROL_DIM; i < full_buffer_size; i++)\n  {\n    for (int j = 0; j < num_within_std_dev.size(); j++)\n    {\n      if (fabsf(colored_noise_output[i]) < j + 1.0)\n      {\n        num_within_std_dev[j]++;\n        break;\n      }\n    }\n  }\n\n  float perc_within_n_std_dev[num_within_std_dev.size()];\n  // Percentages from https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule\n  float known_percentages[3] = { 0.6827, 0.9545, 0.9973 };\n  for (int i = 0; i < num_within_std_dev.size(); i++)\n  {\n    perc_within_n_std_dev[i] =\n        std::accumulate(num_within_std_dev.begin(), num_within_std_dev.begin() + i + 1, 0.0) / full_buffer_size;\n    assert_float_rel_near(known_percentages[i], perc_within_n_std_dev[i], 0.001);\n  }\n  delete[] colored_noise_output;\n}\n\nTYPED_TEST(TestNoise, MultiNoise)\n{\n  int full_buffer_size = this->NUM_ROLLOUTS * this->NUM_TIMESTEPS * this->CONTROL_DIM;\n  float* colored_noise_output = new float[full_buffer_size]{ 0 };\n\n  auto sampler_params = this->sampler->getParams();\n  for (int i = 0; i < this->CONTROL_DIM; i++)\n  {\n    sampler_params.exponents[i] = i;\n    sampler_params.std_dev[i] = 1.0;\n  }\n  this->sampler->setParams(sampler_params);\n\n  this->sampler->generateSamples(0, 0, *(this->gen), false);\n  HANDLE_ERROR(cudaMemcpyAsync(colored_noise_output, this->sampler->getControlSample(0, 0, 0),\n                               sizeof(float) * full_buffer_size, cudaMemcpyDeviceToHost, this->stream));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream));\n\n  const int num_std_devs = 3;\n  // std::vector<int> num_within_std_dev(3, 0);\n  std::vector<std::array<int, num_std_devs>> control_std_dev_count;\n  for (int i = 0; i < this->CONTROL_DIM; i++)\n  {\n    std::array<int, num_std_devs> std_dev_count_i = { 0, 0, 0 };\n    control_std_dev_count.push_back(std_dev_count_i);\n  }\n\n  for (int n = 1; n < this->NUM_ROLLOUTS; n++)\n  {\n    for (int t = 0; t < this->NUM_TIMESTEPS; t++)\n    {\n      for (int i = 0; i < this->CONTROL_DIM; i++)\n      {\n        const int sample_idx = (n * this->NUM_TIMESTEPS + t) * this->CONTROL_DIM + i;\n        for (int j = 0; j < num_std_devs; j++)\n        {\n          if (fabsf(colored_noise_output[sample_idx]) < j + 1.0)\n          {\n            control_std_dev_count[i][j]++;\n            break;\n          }\n        }\n      }\n    }\n  }\n\n  float perc_within_n_std_dev[num_std_devs];\n  // Percentages from https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule\n  float known_percentages[num_std_devs] = { 0.6827, 0.9545, 0.9973 };\n  float proper_count = (this->NUM_ROLLOUTS - 1) * this->NUM_TIMESTEPS;\n  for (int i = 0; i < num_std_devs; i++)\n  {\n    perc_within_n_std_dev[i] =\n        std::accumulate(control_std_dev_count[0].begin(), control_std_dev_count[0].begin() + i + 1, 0.0) / proper_count;\n    assert_float_rel_near(known_percentages[i], perc_within_n_std_dev[i], 0.001);\n  }\n  if (this->CONTROL_DIM > 1)\n  {\n    for (int i = 0; i < num_std_devs; i++)\n    {\n      perc_within_n_std_dev[i] =\n          std::accumulate(control_std_dev_count[1].begin(), control_std_dev_count[1].begin() + i + 1, 0.0) /\n          proper_count;\n      std::cout << \"Control 1 values within \" << i + 1 << \" std dev: \" << perc_within_n_std_dev[i] << std::endl;\n    }\n  }\n  delete[] colored_noise_output;\n}\n\nTEST(ColoredNoise, DISABLED_checkWhiteNoise)\n{\n  int NUM_TIMESTEPS = 50000;\n  int NUM_ROLLOUTS = 1;\n  int CONTROL_DIM = 1;\n  std::vector<float> exponents(CONTROL_DIM, 0.0);\n  int full_buffer_size = NUM_ROLLOUTS * NUM_TIMESTEPS * CONTROL_DIM;\n  float* colored_noise_d;\n  float colored_noise_output[full_buffer_size] = { 0 };\n  HANDLE_ERROR(cudaMalloc((void**)&colored_noise_d, sizeof(float) * full_buffer_size));\n  cudaStream_t stream;\n  curandGenerator_t gen;\n  cudaStreamCreate(&stream);\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetPseudoRandomGeneratorSeed(gen, 42);\n  curandSetStream(gen, stream);\n\n  powerlaw_psd_gaussian(exponents, NUM_TIMESTEPS, NUM_ROLLOUTS, colored_noise_d, 0, gen, 0.0f, stream);\n  HANDLE_ERROR(cudaMemcpyAsync(colored_noise_output, colored_noise_d, sizeof(float) * full_buffer_size,\n                               cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  // Check percentages for 3 standard deviations\n  std::vector<int> num_within_std_dev(3, 0);\n  for (int i = 0; i < full_buffer_size; i++)\n  {\n    for (int j = 0; j < num_within_std_dev.size(); j++)\n    {\n      if (fabsf(colored_noise_output[i]) < j + 1.0)\n      {\n        num_within_std_dev[j]++;\n        break;\n      }\n    }\n  }\n\n  float perc_within_n_std_dev[num_within_std_dev.size()];\n  // Percentages from https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule\n  float known_percentages[3] = { 0.6827, 0.9545, 0.9973 };\n  for (int i = 0; i < num_within_std_dev.size(); i++)\n  {\n    perc_within_n_std_dev[i] =\n        std::accumulate(num_within_std_dev.begin(), num_within_std_dev.begin() + i + 1, 0.0) / full_buffer_size;\n    assert_float_rel_near(known_percentages[i], perc_within_n_std_dev[i], 0.001);\n  }\n}\n\nTEST(ColoredNoise, DISABLED_checkPinkNoise)\n{\n  int NUM_TIMESTEPS = 50000;\n  int NUM_ROLLOUTS = 1;\n  int CONTROL_DIM = 1;\n  std::vector<float> exponents(CONTROL_DIM, 1.0);\n  int full_buffer_size = NUM_ROLLOUTS * NUM_TIMESTEPS * CONTROL_DIM;\n  float* colored_noise_d;\n  float colored_noise_output[full_buffer_size] = { 0 };\n  HANDLE_ERROR(cudaMalloc((void**)&colored_noise_d, sizeof(float) * full_buffer_size));\n  cudaStream_t stream;\n  curandGenerator_t gen;\n  cudaStreamCreate(&stream);\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetPseudoRandomGeneratorSeed(gen, 42);\n  curandSetStream(gen, stream);\n\n  powerlaw_psd_gaussian(exponents, NUM_TIMESTEPS, NUM_ROLLOUTS, colored_noise_d, 0, gen, 0.0f, stream);\n  HANDLE_ERROR(cudaMemcpyAsync(colored_noise_output, colored_noise_d, sizeof(float) * full_buffer_size,\n                               cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n  int within_std_dev = 0;\n  int within_2_std_dev = 0;\n  for (int i = 0; i < full_buffer_size; i++)\n  {\n    if (fabsf(colored_noise_output[i]) < 1.0)\n    {\n      within_std_dev++;\n    }\n    else if (fabsf(colored_noise_output[i]) < 2.0)\n    {\n      within_2_std_dev++;\n    }\n  }\n  float perc_one_std_dev = (float)within_std_dev / full_buffer_size;\n  float perc_two_std_dev = (float)(within_std_dev + within_2_std_dev) / full_buffer_size;\n  std::cout << \"Percentage within 1 std dev: \" << 100 * perc_one_std_dev << std::endl;\n  std::cout << \"Percentage within 2 std dev: \" << 100 * perc_two_std_dev << std::endl;\n  // assert_float_rel_near(0.6827, perc_one_std_dev, 0.001);\n  // assert_float_rel_near(0.9545, perc_two_std_dev, 0.001);\n}\n\nTEST(ColoredNoise, DISABLED_checkRedNoise)\n{\n  int NUM_TIMESTEPS = 50000;\n  int NUM_ROLLOUTS = 1;\n  int CONTROL_DIM = 1;\n  std::vector<float> exponents(CONTROL_DIM, 2.0);\n  int full_buffer_size = NUM_ROLLOUTS * NUM_TIMESTEPS * CONTROL_DIM;\n  float* colored_noise_d;\n  float colored_noise_output[full_buffer_size] = { 0 };\n  HANDLE_ERROR(cudaMalloc((void**)&colored_noise_d, sizeof(float) * full_buffer_size));\n  cudaStream_t stream;\n  curandGenerator_t gen;\n  cudaStreamCreate(&stream);\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetPseudoRandomGeneratorSeed(gen, 42);\n  curandSetStream(gen, stream);\n\n  powerlaw_psd_gaussian(exponents, NUM_TIMESTEPS, NUM_ROLLOUTS, colored_noise_d, 0, gen, 0.0f, stream);\n  HANDLE_ERROR(cudaMemcpyAsync(colored_noise_output, colored_noise_d, sizeof(float) * full_buffer_size,\n                               cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  // Check percentages for 3 standard deviations\n  std::vector<int> num_within_std_dev(3, 0);\n  for (int i = 0; i < full_buffer_size; i++)\n  {\n    for (int j = 0; j < num_within_std_dev.size(); j++)\n    {\n      if (fabsf(colored_noise_output[i]) < j + 1.0)\n      {\n        num_within_std_dev[j]++;\n        break;\n      }\n    }\n  }\n\n  float perc_within_n_std_dev[num_within_std_dev.size()];\n  // Percentages from https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule\n  float known_percentages[3] = { 0.6827, 0.9545, 0.9973 };\n  for (int i = 0; i < num_within_std_dev.size(); i++)\n  {\n    perc_within_n_std_dev[i] =\n        std::accumulate(num_within_std_dev.begin(), num_within_std_dev.begin() + i + 1, 0.0) / full_buffer_size;\n    std::cout << \"Percentage within \" << i + 1 << \" std dev: \" << 100 * perc_within_n_std_dev[i] << std::endl;\n    // assert_float_rel_near(known_percentages[i], perc_within_n_std_dev[i], 0.001);\n  }\n}\n\nTEST(ColoredNoise, DISABLED_checkMultiNoise)\n{\n  int NUM_TIMESTEPS = 6000;\n  int NUM_ROLLOUTS = 50;\n  int CONTROL_DIM = 3;\n  std::vector<float> exponents(CONTROL_DIM, 0.0);\n  exponents[1] = 0.5;\n  exponents[2] = 2.0;\n  // exponents[3] = 1.25;\n  // exponents[4] = 0.75;\n  int full_buffer_size = NUM_ROLLOUTS * NUM_TIMESTEPS * CONTROL_DIM;\n  float* colored_noise_d;\n  float colored_noise_output[full_buffer_size] = { 0 };\n  HANDLE_ERROR(cudaMalloc((void**)&colored_noise_d, sizeof(float) * full_buffer_size));\n  cudaStream_t stream;\n  curandGenerator_t gen;\n  cudaStreamCreate(&stream);\n  curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);\n  curandSetPseudoRandomGeneratorSeed(gen, 42);\n  curandSetStream(gen, stream);\n\n  powerlaw_psd_gaussian(exponents, NUM_TIMESTEPS, NUM_ROLLOUTS, colored_noise_d, 0, gen, 0.0f, stream);\n  HANDLE_ERROR(cudaMemcpyAsync(colored_noise_output, colored_noise_d, sizeof(float) * full_buffer_size,\n                               cudaMemcpyDeviceToHost, stream));\n  HANDLE_ERROR(cudaStreamSynchronize(stream));\n\n  // Check percentages for 3 standard deviations\n  std::vector<int> num_within_std_dev(3, 0);\n  float perc_within_n_std_dev[num_within_std_dev.size()];\n  // Percentages from https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule\n  float known_percentages[3] = { 0.6827, 0.9545, 0.9973 };\n  for (int c = 0; c < CONTROL_DIM; c++)\n  {\n    std::fill(num_within_std_dev.begin(), num_within_std_dev.end(), 0);\n    for (int i = 0; i < NUM_TIMESTEPS * NUM_ROLLOUTS; i++)\n    {\n      for (int j = 0; j < num_within_std_dev.size(); j++)\n      {\n        if (fabsf(colored_noise_output[i * CONTROL_DIM + c]) < j + 1.0)\n        {\n          num_within_std_dev[j]++;\n          break;\n        }\n      }\n    }\n\n    for (int i = 0; i < num_within_std_dev.size(); i++)\n    {\n      perc_within_n_std_dev[i] = std::accumulate(num_within_std_dev.begin(), num_within_std_dev.begin() + i + 1, 0.0) /\n                                 (NUM_ROLLOUTS * NUM_TIMESTEPS);\n      std::cout << \"Colored Noise \" << exponents[c] << \" \";\n      std::cout << \"percent of samples within \" << i + 1 << \" std dev: \" << 100 * perc_within_n_std_dev[i] << std::endl;\n      if (exponents[c] == 0)\n      {\n        assert_float_rel_near(known_percentages[i], perc_within_n_std_dev[i], 0.001);\n      }\n    }\n  }\n}\n"
  },
  {
    "path": "tests/sampling_distributions/gaussian_noise_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/dynamics/linear/linear.cuh>\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n\nnamespace ms = mppi::sampling_distributions;\n\ntemplate <class SAMPLER_T>\nclass GaussianTests : public ::testing::Test\n{\npublic:\n  using SAMPLER_PARAMS_T = typename SAMPLER_T::SAMPLING_PARAMS_T;\n  static const int OUTPUT_DIM = E_INDEX(SAMPLER_T::OutputIndex, NUM_OUTPUTS);\n\n  typedef Eigen::Matrix<float, OUTPUT_DIM, 1> output_array;\n  int num_samples_ = 1000;\n  int num_timesteps_ = 100;\n  int num_distributions_ = 1;\n  int num_verifications_ = 0;\n  int* sampling_indices_d_ = nullptr;\n  int* times_d_ = nullptr;\n  int* distribution_indices_d_ = nullptr;\n  float* outputs_d_ = nullptr;\n  float* controls_d_ = nullptr;\n  float* costs_d_ = nullptr;\n  float lambda_ = 1.0f;\n  float alpha_ = 0.0f;\n\n  std::shared_ptr<SAMPLER_T> sampler_ = nullptr;\n  cudaStream_t stream_ = 0;\n  curandGenerator_t gen_;\n  dim3 thread_block_ = dim3(32, 1, 1);\n\n  std::vector<int> sampled_indices_;\n  std::vector<int> sampled_times_;\n  std::vector<int> sampled_distributions_;\n  std::vector<Eigen::MatrixXf> means_cpu_;\n\n  void SetUp() override\n  {\n    cudaStreamCreate(&stream_);\n    sampler_ = std::make_shared<SAMPLER_T>(stream_);\n    updateSamplerSizes();\n    sampler_->GPUSetup();\n    HANDLE_CURAND_ERROR(curandCreateGenerator(&gen_, CURAND_RNG_PSEUDO_DEFAULT));\n    HANDLE_CURAND_ERROR(curandSetPseudoRandomGeneratorSeed(gen_, 42));\n  }\n\n  void TearDown() override\n  {\n  }\n\n  void updateSamplerSizes()\n  {\n    sampler_->setNumTimesteps(num_timesteps_);\n    sampler_->setNumRollouts(num_samples_);\n    sampler_->setNumDistributions(num_distributions_);\n    means_cpu_.resize(num_distributions_);\n  }\n\n  void setRandomMeans()\n  {\n    for (int i = 0; i < num_distributions_; i++)\n    {\n      Eigen::MatrixXf mean_i = 5 * Eigen::MatrixXf::Random(SAMPLER_T::CONTROL_DIM, num_timesteps_);\n      sampler_->copyImportanceSamplerToDevice(mean_i.data(), i, false);\n      means_cpu_[i] = mean_i;\n    }\n  }\n};\n\ntemplate <int C_DIM>\nusing GaussianParams3 = ms::GaussianParamsImpl<C_DIM, 3>;\n\ntemplate <class DYN_PARAMS_T>\nclass TestGaussianDistribution\n  : public ms::GaussianDistributionImpl<TestGaussianDistribution<DYN_PARAMS_T>, GaussianParams3, DYN_PARAMS_T>\n{\npublic:\n  using PARENT_CLASS = ms::GaussianDistributionImpl<TestGaussianDistribution, GaussianParams3, DYN_PARAMS_T>;\n  using SAMPLING_PARAMS_T = typename PARENT_CLASS::SAMPLING_PARAMS_T;\n\n  TestGaussianDistribution(cudaStream_t stream = 0) : PARENT_CLASS(stream)\n  {\n  }\n  TestGaussianDistribution(const SAMPLING_PARAMS_T& params, cudaStream_t stream = 0) : PARENT_CLASS(params, stream)\n  {\n  }\n};\n\nusing TYPE_TESTS = ::testing::Types<ms::GaussianDistribution<LinearDynamicsParams<4, 1>>,\n                                    ms::GaussianDistribution<LinearDynamicsParams<4, 2>>,\n                                    TestGaussianDistribution<LinearDynamicsParams<4, 2>>>;\n\nTYPED_TEST_SUITE(GaussianTests, TYPE_TESTS);\n\nTYPED_TEST(GaussianTests, SetNumDistributions)\n{\n  using T = TypeParam;\n  this->num_distributions_ = 3;\n  if (this->num_distributions_ > T::SAMPLING_PARAMS_T::MAX_DISTRIBUTIONS)\n  {\n    EXPECT_THROW(this->sampler_->setNumDistributions(this->num_distributions_), std::out_of_range);\n  }\n  else\n  {\n    this->sampler_->setNumDistributions(this->num_distributions_);\n    auto params = this->sampler_->getParams();\n    EXPECT_EQ(this->num_distributions_, params.num_distributions);\n  }\n}\n\nTYPED_TEST(GaussianTests, CheckSamplesAreGaussian)\n{\n  using T = TypeParam;\n  float std_dev = 2.3f;\n  auto params = this->sampler_->getParams();\n  params.pure_noise_trajectories_percentage = 0.0f;\n  for (int i = 0; i < this->num_distributions_; i++)\n  {\n    for (int j = 0; j < T::CONTROL_DIM; j++)\n    {\n      params.std_dev[j + i * T::CONTROL_DIM] = std_dev;\n    }\n  }\n  this->sampler_->setParams(params);\n  this->setRandomMeans();\n  this->setRandomMeans();\n\n  this->sampler_->generateSamples(0, 0, this->gen_, false);\n  std::vector<float> sampled_controls(T::CONTROL_DIM * this->num_timesteps_ * this->num_samples_ *\n                                      this->num_distributions_);\n  HANDLE_ERROR(cudaMemcpyAsync(sampled_controls.data(), this->sampler_->getControlSample(0, 0, 0),\n                               sizeof(float) * this->num_samples_ * this->num_timesteps_ * this->num_distributions_ *\n                                   T::CONTROL_DIM,\n                               cudaMemcpyDeviceToHost, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  int k_bins = 3;\n  std::vector<int> binned_counts(k_bins, 0);\n  for (int d = 0; d < this->num_distributions_; d++)\n  {\n    for (int s = 0; s < this->num_samples_; s++)\n    {\n      for (int t = 0; t < this->num_timesteps_; t++)\n      {\n        for (int i = 0; i < T::CONTROL_DIM; i++)\n        {\n          int index = ((d * this->num_samples_ + s) * this->num_timesteps_ + t) * T::CONTROL_DIM + i;\n          float z_score = (sampled_controls[index] - this->means_cpu_[d](i, t)) / std_dev;\n          for (int j = k_bins; j >= 0; j--)\n          {\n            if (fabsf(z_score) < j + 1)\n            {\n              binned_counts[j]++;\n            }\n            else\n            {\n              break;\n            }\n          }\n        }\n      }\n    }\n  }\n\n  // Check how many samples are properly sampled\n  std::vector<double> normalized_bins(k_bins, 0.0f);\n  std::vector<double> sigma_rules(k_bins);\n  for (int i = 0; i < k_bins; i++)\n  {\n    sigma_rules[i] = 0.5 * (erf((i + 1) / sqrt(2.0)) - erf(-(i + 1) / sqrt(2.0)));\n    normalized_bins[i] = (double)binned_counts[i] /\n                         (this->num_distributions_ * this->num_samples_ * this->num_timesteps_ * T::CONTROL_DIM);\n    double diff = fabsf(normalized_bins[i] - sigma_rules[i]) / sigma_rules[i];\n    EXPECT_NEAR(diff, 0.0, 1e-3);\n  }\n}\n\nTYPED_TEST(GaussianTests, CheckZeroMeanSamplesAreGaussian)\n{\n  using T = TypeParam;\n  auto params = this->sampler_->getParams();\n  params.pure_noise_trajectories_percentage = 1.0f;\n  float std_dev = 2.3f;\n  for (int i = 0; i < this->num_distributions_; i++)\n  {\n    for (int j = 0; j < T::CONTROL_DIM; j++)\n    {\n      params.std_dev[j + i * T::CONTROL_DIM] = std_dev;\n    }\n  }\n  this->sampler_->setParams(params);\n  this->setRandomMeans();  // should end up doing not effecting the samples\n\n  this->sampler_->generateSamples(0, 0, this->gen_, false);\n  std::vector<float> sampled_controls(T::CONTROL_DIM * this->num_timesteps_ * this->num_samples_ *\n                                      this->num_distributions_);\n  HANDLE_ERROR(cudaMemcpyAsync(sampled_controls.data(), this->sampler_->getControlSample(0, 0, 0),\n                               sizeof(float) * this->num_samples_ * this->num_timesteps_ * this->num_distributions_ *\n                                   T::CONTROL_DIM,\n                               cudaMemcpyDeviceToHost, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  int k_bins = 3;\n  std::vector<int> binned_counts(k_bins, 0);\n  for (int d = 0; d < this->num_distributions_; d++)\n  {\n    for (int s = 0; s < this->num_samples_; s++)\n    {\n      for (int t = 0; t < this->num_timesteps_; t++)\n      {\n        for (int i = 0; i < T::CONTROL_DIM; i++)\n        {\n          int index = ((d * this->num_samples_ + s) * this->num_timesteps_ + t) * T::CONTROL_DIM + i;\n          float z_score = (sampled_controls[index]) / std_dev;\n          for (int j = k_bins; j >= 0; j--)\n          {\n            if (fabsf(z_score) < j + 1)\n            {\n              binned_counts[j]++;\n            }\n            else\n            {\n              break;\n            }\n          }\n        }\n      }\n    }\n  }\n\n  // Check how many samples are properly sampled\n  std::vector<double> normalized_bins(k_bins, 0.0f);\n  std::vector<double> sigma_rules(k_bins);\n  for (int i = 0; i < k_bins; i++)\n  {\n    sigma_rules[i] = 0.5 * (erf((i + 1) / sqrt(2.0)) - erf(-(i + 1) / sqrt(2.0)));\n    normalized_bins[i] = (double)binned_counts[i] /\n                         (this->num_distributions_ * this->num_samples_ * this->num_timesteps_ * T::CONTROL_DIM);\n    double diff = fabsf(normalized_bins[i] - sigma_rules[i]) / sigma_rules[i];\n    EXPECT_NEAR(diff, 0.0, 1e-3);\n  }\n}\n"
  },
  {
    "path": "tests/sampling_distributions/generic_sampling_distribution_tests.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/core/mppi_common.cuh>\n#include <mppi/ddp/util.h>\n#include <mppi/dynamics/linear/linear.cuh>\n#include <mppi/sampling_distributions/colored_noise/colored_noise.cuh>\n#include <mppi/sampling_distributions/gaussian/gaussian.cuh>\n#include <mppi/sampling_distributions/nln/nln.cuh>\n#include <mppi/utils/math_utils.h>\n#include <mppi/utils/type_printing.h>\n\n#if __cplusplus < 201703L  // std::void_t is a C++17 feature, used for inherited_from_gaussian struct\nnamespace std\n{\ntemplate <typename... Ts>\nstruct make_void\n{\n  typedef void type;\n};\n\ntemplate <typename... Ts>\nusing void_t = typename make_void<Ts...>::type;\n}  // namespace std\n#endif\n\nnamespace ms = mppi::sampling_distributions;\n\ntemplate <class SAMPLER_T>\n__global__ void getControlSamplesKernel(SAMPLER_T* __restrict__ sampler, const int num_control_samples,\n                                        const int* __restrict__ sample_indexes_d, const int* __restrict__ times_d,\n                                        const int* __restrict__ distribution_indexes_d,\n                                        const float* __restrict__ outputs_d, float* __restrict__ control_samples_d)\n{\n  const int size_of_theta_d_bytes = mppi::kernels::calcClassSharedMemSize(sampler, blockDim);\n  using OutputIndex = typename SAMPLER_T::OutputIndex;\n  const int OUTPUT_DIM = E_INDEX(OutputIndex, NUM_OUTPUTS);\n  extern __shared__ float entire_buffer[];\n  float* theta_d_shared = &entire_buffer[0 / sizeof(float)];\n  float* outputs_shared = &entire_buffer[size_of_theta_d_bytes / sizeof(float)];  // THREAD_BLOCK_X * OUTPUT_DIM in size\n\n  int x_index, x_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_X>(x_index, x_step);\n  int y_index, y_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(y_index, y_step);\n  int test_index = threadIdx.x + blockDim.x * blockIdx.x;\n  // load output into shared memory\n  if (test_index < num_control_samples)\n  {\n    for (int j = y_index; j < OUTPUT_DIM; j += y_step)\n    {\n      outputs_shared[j + x_index * OUTPUT_DIM] = outputs_d[j + test_index * OUTPUT_DIM];\n    }\n  }\n  __syncthreads();\n  float* output = &outputs_shared[x_index * OUTPUT_DIM];\n  // initialize sampling distributions\n  sampler->initializeDistributions(output, 0.0f, 0.01f, theta_d_shared);\n  __syncthreads();\n  // get control samples\n  if (test_index < num_control_samples)\n  {\n    int sample_index = sample_indexes_d[test_index];\n    int t = times_d[test_index];\n    int distribution_idx = distribution_indexes_d[test_index];\n    float* u_to_save_to = &control_samples_d[test_index * SAMPLER_T::CONTROL_DIM];\n    sampler->readControlSample(sample_index, t, distribution_idx, u_to_save_to, theta_d_shared, y_step, y_index,\n                               output);\n  }\n}\n\ntemplate <class SAMPLER_T>\n__global__ void getLikelihoodCostKernel(SAMPLER_T* __restrict__ sampler, const int num_control_samples, float lambda,\n                                        float alpha, const int* __restrict__ sample_indexes_d,\n                                        const int* __restrict__ times_d, const int* __restrict__ distribution_indexes_d,\n                                        const float* __restrict__ outputs_d, float* __restrict__ control_samples_d,\n                                        float* __restrict__ costs_d)\n{\n  const int size_of_theta_d_bytes = mppi::kernels::calcClassSharedMemSize(sampler, blockDim);\n  using OutputIndex = typename SAMPLER_T::OutputIndex;\n  const int OUTPUT_DIM = E_INDEX(OutputIndex, NUM_OUTPUTS);\n  extern __shared__ float entire_buffer[];\n  float* theta_d_shared = &entire_buffer[0 / sizeof(float)];\n  float* outputs_shared =\n      &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];  // THREAD_BLOCK_X * OUTPUT_DIM in size\n  float* running_cost_shared = &outputs_shared[blockDim.x * OUTPUT_DIM];\n  int running_cost_index = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n  float* running_cost = &running_cost_shared[running_cost_index];\n  running_cost[0] = 0.0f;\n\n  int x_index, x_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_X>(x_index, x_step);\n  int y_index, y_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(y_index, y_step);\n  int test_index = threadIdx.x + blockDim.x * blockIdx.x;\n  // load output into shared memory\n  if (test_index < num_control_samples)\n  {\n    for (int j = y_index; j < OUTPUT_DIM; j += y_step)\n    {\n      outputs_shared[j + x_index * OUTPUT_DIM] = outputs_d[j + test_index * OUTPUT_DIM];\n    }\n  }\n  __syncthreads();\n  float* output = &outputs_shared[x_index * OUTPUT_DIM];\n  // initialize sampling distributions\n  sampler->initializeDistributions(output, 0.0f, 0.01f, theta_d_shared);\n  __syncthreads();\n  // get control samples\n  if (test_index < num_control_samples)\n  {\n    int sample_index = sample_indexes_d[test_index];\n    int t = times_d[test_index];\n    int distribution_idx = distribution_indexes_d[test_index];\n    float* u_to_save_to = &control_samples_d[test_index * SAMPLER_T::CONTROL_DIM];\n    sampler->readControlSample(sample_index, t, distribution_idx, u_to_save_to, theta_d_shared, y_step, y_index,\n                               output);\n  }\n  __syncthreads();\n  // Calculate likelihood ratio cost\n  if (test_index < num_control_samples)\n  {\n    int sample_index = sample_indexes_d[test_index];\n    int t = times_d[test_index];\n    int distribution_idx = distribution_indexes_d[test_index];\n    float* u = &control_samples_d[test_index * SAMPLER_T::CONTROL_DIM];\n    running_cost[0] =\n        sampler->computeLikelihoodRatioCost(u, theta_d_shared, sample_index, t, distribution_idx, lambda, alpha);\n  }\n  // __syncthreads();\n  // if (threadIdx.x == 1 && threadIdx.y + threadIdx.z == 0 && blockIdx.x == 0)\n  // {\n  //   printf(\"Running costs:\\n\");\n  //   for (int i = 0; i < blockDim.x; i++)\n  //   {\n  //     float cost_sum = 0.0f;\n  //     for (int j = 0; j < blockDim.y; j++)\n  //     {\n  //       cost_sum += running_cost_shared[i + j * blockDim.x];\n  //       printf(\"(%2d, %2d): %6.3f\\n\", i, j, running_cost_shared[i + j * blockDim.x]);\n  //     }\n  //     printf(\"Sum of y dim for %2d: %8.5f\\n\", i, cost_sum);\n  //   }\n  // }\n  running_cost = &running_cost_shared[blockDim.x * blockDim.y * threadIdx.z];\n  __syncthreads();\n  mppi::kernels::costArrayReduction(&running_cost[x_index], blockDim.y, threadIdx.y, blockDim.y, threadIdx.y == 0,\n                                    blockDim.x);\n  if (test_index < num_control_samples)\n  {\n    costs_d[test_index] = running_cost[x_index];\n  }\n}\n\ntemplate <class SAMPLER_T>\n__global__ void getFeedbackCostKernel(SAMPLER_T* __restrict__ sampler, const int num_control_samples, float lambda,\n                                      float alpha, const int* __restrict__ sample_indexes_d,\n                                      const int* __restrict__ times_d, const int* __restrict__ distribution_indexes_d,\n                                      const float* __restrict__ outputs_d, float* __restrict__ control_samples_d,\n                                      float* __restrict__ costs_d)\n{\n  const int size_of_theta_d_bytes = mppi::kernels::calcClassSharedMemSize(sampler, blockDim);\n  using OutputIndex = typename SAMPLER_T::OutputIndex;\n  const int OUTPUT_DIM = E_INDEX(OutputIndex, NUM_OUTPUTS);\n  extern __shared__ float entire_buffer[];\n  float* theta_d_shared = &entire_buffer[0 / sizeof(float)];\n  float* outputs_shared =\n      &theta_d_shared[size_of_theta_d_bytes / sizeof(float)];  // THREAD_BLOCK_X * OUTPUT_DIM in size\n  float* running_cost_shared = &outputs_shared[blockDim.x * OUTPUT_DIM];\n  int running_cost_index = threadIdx.x + blockDim.x * (threadIdx.y + blockDim.y * threadIdx.z);\n  float* running_cost = &running_cost_shared[running_cost_index];\n  running_cost[0] = 0.0f;\n\n  int x_index, x_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_X>(x_index, x_step);\n  int y_index, y_step;\n  mppi::p1::getParallel1DIndex<mppi::p1::Parallel1Dir::THREAD_Y>(y_index, y_step);\n  int test_index = threadIdx.x + blockDim.x * blockIdx.x;\n  // load output into shared memory\n  if (test_index < num_control_samples)\n  {\n    for (int j = y_index; j < OUTPUT_DIM; j += y_step)\n    {\n      outputs_shared[j + x_index * OUTPUT_DIM] = outputs_d[j + test_index * OUTPUT_DIM];\n    }\n  }\n  __syncthreads();\n  float* output = &outputs_shared[x_index * OUTPUT_DIM];\n  // initialize sampling distributions\n  sampler->initializeDistributions(output, 0.0f, 0.01f, theta_d_shared);\n  __syncthreads();\n  // get control samples\n  if (test_index < num_control_samples)\n  {\n    int sample_index = sample_indexes_d[test_index];\n    int t = times_d[test_index];\n    int distribution_idx = distribution_indexes_d[test_index];\n    float* u_to_save_to = &control_samples_d[test_index * SAMPLER_T::CONTROL_DIM];\n    sampler->readControlSample(sample_index, t, distribution_idx, u_to_save_to, theta_d_shared, y_step, y_index,\n                               output);\n  }\n  __syncthreads();\n  // Calculate Feedback Control cost using random control samples from distribution\n  if (test_index < num_control_samples)\n  {\n    int sample_index = sample_indexes_d[test_index];\n    int t = times_d[test_index];\n    int distribution_idx = distribution_indexes_d[test_index];\n    float* u = &control_samples_d[test_index * SAMPLER_T::CONTROL_DIM];\n    running_cost[0] = sampler->computeFeedbackCost(u, theta_d_shared, t, distribution_idx, lambda, alpha);\n  }\n  running_cost = &running_cost_shared[blockDim.x * blockDim.y * threadIdx.z];\n  __syncthreads();\n  mppi::kernels::costArrayReduction(&running_cost[x_index], blockDim.y, threadIdx.y, blockDim.y, threadIdx.y == 0,\n                                    blockDim.x);\n  if (test_index < num_control_samples)\n  {\n    costs_d[test_index] = running_cost[x_index];\n  }\n}\n\ntemplate <class SAMPLER_T>\nclass SamplingDistributionTests : public ::testing::Test\n{\npublic:\n  using SAMPLER_PARAMS_T = typename SAMPLER_T::SAMPLING_PARAMS_T;\n  static const int OUTPUT_DIM = E_INDEX(SAMPLER_T::OutputIndex, NUM_OUTPUTS);\n  typedef Eigen::Matrix<float, OUTPUT_DIM, 1> output_array;\n  int num_samples_ = 1000;\n  int num_timesteps_ = 100;\n  int num_distributions_ = 1;\n  int num_verifications_ = 0;\n  int* sampling_indices_d_ = nullptr;\n  int* times_d_ = nullptr;\n  int* distribution_indices_d_ = nullptr;\n  float* outputs_d_ = nullptr;\n  float* controls_d_ = nullptr;\n  float* costs_d_ = nullptr;\n  float lambda_ = 1.0f;\n  float alpha_ = 0.0f;\n\n  std::shared_ptr<SAMPLER_T> sampler_ = nullptr;\n  cudaStream_t stream_ = 0;\n  curandGenerator_t gen_;\n  dim3 thread_block_ = dim3(32, 2, 1);\n\n  std::vector<int> sampled_indices_;\n  std::vector<int> sampled_times_;\n  std::vector<int> sampled_distributions_;\n  util::EigenAlignedVector<float, OUTPUT_DIM, 1> sampled_outputs_;\n  util::EigenAlignedVector<float, SAMPLER_T::CONTROL_DIM, 1> sampled_controls_cpu_;\n  util::EigenAlignedVector<float, SAMPLER_T::CONTROL_DIM, 1> sampled_controls_gpu_;\n\n  void SetUp() override\n  {\n    cudaStreamCreate(&stream_);\n    sampler_ = std::make_shared<SAMPLER_T>(stream_);\n    sampler_->GPUSetup();\n    setUpCudaMemory(3000);\n    HANDLE_CURAND_ERROR(curandCreateGenerator(&gen_, CURAND_RNG_PSEUDO_DEFAULT));\n    HANDLE_CURAND_ERROR(curandSetPseudoRandomGeneratorSeed(gen_, 42));\n  }\n\n  template <class T>\n  void freeCudaPtr(T*& ptr)\n  {\n    if (ptr != nullptr)\n    {\n      cudaFree(ptr);\n    }\n    ptr = nullptr;\n  }\n\n  void setUpCudaMemory(const int num_verifications)\n  {\n    if (num_verifications > num_verifications_)\n    {\n      freeCudaPtr(outputs_d_);\n      freeCudaPtr(sampling_indices_d_);\n      freeCudaPtr(times_d_);\n      freeCudaPtr(distribution_indices_d_);\n      freeCudaPtr(controls_d_);\n      freeCudaPtr(costs_d_);\n\n      // Allocate GPU memory\n      HANDLE_ERROR(cudaMalloc((void**)&sampling_indices_d_, sizeof(int) * num_verifications));\n      HANDLE_ERROR(cudaMalloc((void**)&times_d_, sizeof(int) * num_verifications));\n      HANDLE_ERROR(cudaMalloc((void**)&distribution_indices_d_, sizeof(int) * num_verifications));\n      HANDLE_ERROR(cudaMalloc((void**)&outputs_d_, sizeof(float) * OUTPUT_DIM * num_verifications));\n      HANDLE_ERROR(cudaMalloc((void**)&controls_d_, sizeof(float) * SAMPLER_T::CONTROL_DIM * num_verifications));\n      HANDLE_ERROR(cudaMalloc((void**)&costs_d_, sizeof(float) * num_verifications));\n\n      num_verifications_ = num_verifications;\n    }\n  }\n\n  void generateNewSamples()\n  {\n    // Create sample index\n    std::vector<int> samples =\n        mppi::math::sample_without_replacement(num_verifications_, num_timesteps_ * num_samples_ * num_distributions_);\n\n    // Fill in sampled indices and copy to GPU\n    sampled_indices_.resize(num_verifications_);\n    sampled_times_.resize(num_verifications_);\n    sampled_distributions_.resize(num_verifications_);\n    sampled_outputs_.resize(num_verifications_);\n    for (int i = 0; i < num_verifications_; i++)\n    {\n      const int sample = samples[i];\n      sampled_indices_[i] = (sample / (num_timesteps_ * num_distributions_)) % num_samples_;\n      sampled_times_[i] = (sample / num_distributions_) % num_timesteps_;\n      sampled_distributions_[i] = sample % num_distributions_;\n      sampled_outputs_[i] = output_array::Random();\n      HANDLE_ERROR(cudaMemcpyAsync(outputs_d_ + i * OUTPUT_DIM, sampled_outputs_[i].data(), sizeof(float) * OUTPUT_DIM,\n                                   cudaMemcpyHostToDevice, stream_));\n    }\n    HANDLE_ERROR(cudaMemcpyAsync(sampling_indices_d_, sampled_indices_.data(), sizeof(int) * num_verifications_,\n                                 cudaMemcpyHostToDevice, stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(times_d_, sampled_times_.data(), sizeof(int) * num_verifications_,\n                                 cudaMemcpyHostToDevice, stream_));\n    HANDLE_ERROR(cudaMemcpyAsync(distribution_indices_d_, sampled_distributions_.data(),\n                                 sizeof(int) * num_verifications_, cudaMemcpyHostToDevice, stream_));\n\n    // Create non-zero mean\n    Eigen::MatrixXf random_mean = Eigen::MatrixXf::Random(SAMPLER_T::CONTROL_DIM, num_timesteps_);\n    for (int i = 0; i < num_distributions_; i++)\n    {\n      sampler_->copyImportanceSamplerToDevice(random_mean.data(), i, false);\n    }\n    float mean_update = 1.0f;\n    sampled_controls_cpu_.resize(num_verifications_);\n    sampled_controls_gpu_.resize(num_verifications_);\n    sampler_->generateSamples(0, 0, gen_, false);\n    HANDLE_ERROR(cudaMemcpyAsync(costs_d_, &mean_update, sizeof(float), cudaMemcpyHostToDevice, stream_));\n    for (int i = 0; i < num_distributions_; i++)\n    {\n      sampler_->updateDistributionParamsFromDevice(costs_d_, 1.0f, i, false);\n    }\n    HANDLE_ERROR(cudaStreamSynchronize(stream_));\n  }\n\n  void updateSampler()\n  {\n    sampler_->setNumTimesteps(num_timesteps_);\n    sampler_->setNumRollouts(num_samples_);\n    sampler_->setNumDistributions(num_distributions_);\n  }\n\n  void TearDown() override\n  {\n    HANDLE_ERROR(cudaStreamSynchronize(stream_));\n    freeCudaPtr(sampling_indices_d_);\n    freeCudaPtr(times_d_);\n    freeCudaPtr(distribution_indices_d_);\n    freeCudaPtr(outputs_d_);\n    freeCudaPtr(controls_d_);\n    freeCudaPtr(costs_d_);\n  }\n};\n\n/**\n * Special setup for Gaussian-based distributions\n **/\ntemplate <class T, typename = void>\nstruct inherited_from_gaussian : std::false_type\n{\npublic:\n  void operator()(std::shared_ptr<T> sampler) const\n  {  // Empty setup\n  }\n};\n\ntemplate <class T>\nstruct inherited_from_gaussian<\n    T, std::void_t<decltype(T::SAMPLING_PARAMS_T::MAX_DISTRIBUTIONS),\n                   typename std::enable_if<\n                       std::is_base_of<ms::GaussianParamsImpl<T::CONTROL_DIM, T::SAMPLING_PARAMS_T::MAX_DISTRIBUTIONS>,\n                                       typename T::SAMPLING_PARAMS_T>::value,\n                       bool>::type>> : std::true_type\n{\npublic:\n  void operator()(std::shared_ptr<T> sampler) const\n  {  // Setup std dev and cost coefficients\n    auto params = sampler->getParams();\n    for (int i = 0; i < T::CONTROL_DIM; i++)\n    {\n      params.control_cost_coeff[i] = 1.0f;\n    }\n    for (int dist_i = 0; dist_i < params.num_distributions; dist_i++)\n    {\n      for (int std_dev_i = 0; std_dev_i < T::CONTROL_DIM; std_dev_i++)\n      {\n        params.std_dev[std_dev_i + dist_i * T::CONTROL_DIM] = 2.0f;\n      }\n    }\n    sampler->setParams(params);\n  }\n};\n\nusing TYPE_TESTS = ::testing::Types<\n    ms::GaussianDistribution<LinearDynamicsParams<4, 1>>, ms::GaussianDistribution<LinearDynamicsParams<4, 2>>,\n    ms::GaussianDistribution<LinearDynamicsParams<4, 4>>, ms::GaussianDistribution<LinearDynamicsParams<4, 3>>,\n    ms::GaussianDistribution<LinearDynamicsParams<1, 7>>, ms::ColoredNoiseDistribution<LinearDynamicsParams<4, 1>>,\n    ms::ColoredNoiseDistribution<LinearDynamicsParams<4, 2>>, ms::ColoredNoiseDistribution<LinearDynamicsParams<4, 4>>,\n    ms::ColoredNoiseDistribution<LinearDynamicsParams<3, 3>>, ms::NLNDistribution<LinearDynamicsParams<4, 1>>,\n    ms::NLNDistribution<LinearDynamicsParams<4, 2>>, ms::NLNDistribution<LinearDynamicsParams<4, 4>>,\n    ms::NLNDistribution<LinearDynamicsParams<3, 3>>>;\n\nTYPED_TEST_SUITE(SamplingDistributionTests, TYPE_TESTS);\n\nTYPED_TEST(SamplingDistributionTests, TestCreation)\n{\n  using T = TypeParam;\n  EXPECT_TRUE(true);\n  // testMethod<T, inherited_from_gaussian<T>>();\n  inherited_from_gaussian<T>()(this->sampler_);\n}\n\nTYPED_TEST(SamplingDistributionTests, SetNumDistributions)\n{\n  using T = TypeParam;\n  this->num_distributions_ = 2;\n  this->updateSampler();\n  this->generateNewSamples();\n  EXPECT_TRUE(true);\n}\n\nTYPED_TEST(SamplingDistributionTests, ReadControlsFromGPU)\n{\n  using T = TypeParam;\n  this->updateSampler();\n  this->generateNewSamples();\n\n  dim3 grid_dim(mppi::math::int_ceil(this->num_verifications_, this->thread_block_.x), 1, 1);\n  std::size_t shared_mem_bytes = mppi::kernels::calcClassSharedMemSize<T>(this->sampler_.get(), this->thread_block_) +\n                                 sizeof(float) * this->OUTPUT_DIM * this->thread_block_.x;\n\n  getControlSamplesKernel<T><<<grid_dim, this->thread_block_, shared_mem_bytes, this->stream_>>>(\n      this->sampler_->sampling_d_, this->num_verifications_, this->sampling_indices_d_, this->times_d_,\n      this->distribution_indices_d_, this->outputs_d_, this->controls_d_);\n  HANDLE_ERROR(cudaGetLastError());\n  // Copy back to CPU\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_gpu_[i].data(), this->controls_d_ + i * T::CONTROL_DIM,\n                                 sizeof(float) * T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n    // Query on CPU\n    float* u_i =\n        this->sampler_->getControlSample(this->sampled_indices_[i], this->sampled_times_[i],\n                                         this->sampled_distributions_[i], nullptr, this->sampled_outputs_[i].data());\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_cpu_[i].data(), u_i, sizeof(float) * T::CONTROL_DIM,\n                                 cudaMemcpyDeviceToHost, this->stream_));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    float diff = (this->sampled_controls_cpu_[i] - this->sampled_controls_gpu_[i]).array().abs().sum();\n    ASSERT_FLOAT_EQ(diff, 0.0f);\n  }\n}\n\nTYPED_TEST(SamplingDistributionTests, ReadControlsFromGPUMoreDistributions)\n{\n  using T = TypeParam;\n  this->num_distributions_ = 2;\n  this->updateSampler();\n  this->generateNewSamples();\n\n  dim3 grid_dim(mppi::math::int_ceil(this->num_verifications_, this->thread_block_.x), 1, 1);\n  std::size_t shared_mem_bytes = mppi::kernels::calcClassSharedMemSize<T>(this->sampler_.get(), this->thread_block_) +\n                                 sizeof(float) * this->OUTPUT_DIM * this->thread_block_.x;\n\n  getControlSamplesKernel<T><<<grid_dim, this->thread_block_, shared_mem_bytes, this->stream_>>>(\n      this->sampler_->sampling_d_, this->num_verifications_, this->sampling_indices_d_, this->times_d_,\n      this->distribution_indices_d_, this->outputs_d_, this->controls_d_);\n  HANDLE_ERROR(cudaGetLastError());\n  // Copy back to CPU\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_gpu_[i].data(), this->controls_d_ + i * T::CONTROL_DIM,\n                                 sizeof(float) * T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n    // Query on CPU\n    float* u_i =\n        this->sampler_->getControlSample(this->sampled_indices_[i], this->sampled_times_[i],\n                                         this->sampled_distributions_[i], nullptr, this->sampled_outputs_[i].data());\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_cpu_[i].data(), u_i, sizeof(float) * T::CONTROL_DIM,\n                                 cudaMemcpyDeviceToHost, this->stream_));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    float diff = (this->sampled_controls_cpu_[i] - this->sampled_controls_gpu_[i]).array().abs().sum();\n    ASSERT_FLOAT_EQ(diff, 0.0f);\n  }\n}\n\nTYPED_TEST(SamplingDistributionTests, ReadControlsFromGPUDifferentNoiseForDistributions)\n{\n  using T = TypeParam;\n  this->num_distributions_ = 2;\n  auto params = this->sampler_->getParams();\n  params.use_same_noise_for_all_distributions = false;\n  this->sampler_->setParams(params);\n  this->updateSampler();\n  this->generateNewSamples();\n\n  dim3 grid_dim(mppi::math::int_ceil(this->num_verifications_, this->thread_block_.x), 1, 1);\n  std::size_t shared_mem_bytes = mppi::kernels::calcClassSharedMemSize<T>(this->sampler_.get(), this->thread_block_) +\n                                 sizeof(float) * this->OUTPUT_DIM * this->thread_block_.x;\n\n  getControlSamplesKernel<T><<<grid_dim, this->thread_block_, shared_mem_bytes, this->stream_>>>(\n      this->sampler_->sampling_d_, this->num_verifications_, this->sampling_indices_d_, this->times_d_,\n      this->distribution_indices_d_, this->outputs_d_, this->controls_d_);\n  HANDLE_ERROR(cudaGetLastError());\n  // Copy back to CPU\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_gpu_[i].data(), this->controls_d_ + i * T::CONTROL_DIM,\n                                 sizeof(float) * T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n    // Query on CPU\n    float* u_i =\n        this->sampler_->getControlSample(this->sampled_indices_[i], this->sampled_times_[i],\n                                         this->sampled_distributions_[i], nullptr, this->sampled_outputs_[i].data());\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_cpu_[i].data(), u_i, sizeof(float) * T::CONTROL_DIM,\n                                 cudaMemcpyDeviceToHost, this->stream_));\n  }\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    float diff = (this->sampled_controls_cpu_[i] - this->sampled_controls_gpu_[i]).array().abs().sum();\n    ASSERT_FLOAT_EQ(diff, 0.0f);\n  }\n}\n\nTYPED_TEST(SamplingDistributionTests, CompareLikelihoodRatioCostsCPUvsGPU)\n{\n  using T = TypeParam;\n  this->num_distributions_ = 2;\n  auto params = this->sampler_->getParams();\n  params.use_same_noise_for_all_distributions = false;\n  this->sampler_->setParams(params);\n  inherited_from_gaussian<T>()(this->sampler_);\n  this->updateSampler();\n  this->generateNewSamples();\n\n  dim3 grid_dim(mppi::math::int_ceil(this->num_verifications_, this->thread_block_.x), 1, 1);\n  std::size_t shared_mem_bytes =\n      mppi::kernels::calcClassSharedMemSize<T>(this->sampler_.get(), this->thread_block_) +\n      sizeof(float) * (this->OUTPUT_DIM * this->thread_block_.x +\n                       this->thread_block_.x * this->thread_block_.y * this->thread_block_.z);\n\n  getLikelihoodCostKernel<T><<<grid_dim, this->thread_block_, shared_mem_bytes, this->stream_>>>(\n      this->sampler_->sampling_d_, this->num_verifications_, this->lambda_, this->alpha_, this->sampling_indices_d_,\n      this->times_d_, this->distribution_indices_d_, this->outputs_d_, this->controls_d_, this->costs_d_);\n  HANDLE_ERROR(cudaGetLastError());\n\n  std::vector<float> costs_gpu(this->num_verifications_);\n  // Copy back to CPU\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_gpu_[i].data(), this->controls_d_ + i * T::CONTROL_DIM,\n                                 sizeof(float) * T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n    // Query on CPU\n    float* u_i =\n        this->sampler_->getControlSample(this->sampled_indices_[i], this->sampled_times_[i],\n                                         this->sampled_distributions_[i], nullptr, this->sampled_outputs_[i].data());\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_cpu_[i].data(), u_i, sizeof(float) * T::CONTROL_DIM,\n                                 cudaMemcpyDeviceToHost, this->stream_));\n  }\n  HANDLE_ERROR(cudaMemcpyAsync(costs_gpu.data(), this->costs_d_, sizeof(float) * this->num_verifications_,\n                               cudaMemcpyDeviceToHost, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  float cost;\n  for (int i = 2; i < this->num_verifications_; i++)\n  {\n    cost = 0.0f;\n    cost = this->sampler_->computeLikelihoodRatioCost(this->sampled_controls_cpu_[i], this->sampled_indices_[i],\n                                                      this->sampled_times_[i], this->sampled_distributions_[i],\n                                                      this->lambda_, this->alpha_);\n    ASSERT_NEAR(cost, costs_gpu[i], fabsf(cost) * 5e-5)\n        << \" failed on sample \" << this->sampled_indices_[i] << \", time \" << this->sampled_times_[i]\n        << \", dist: \" << this->sampled_distributions_[i];\n  }\n}\n\nTYPED_TEST(SamplingDistributionTests, CompareFeedbackCostsCPUvsGPU)\n{\n  using T = TypeParam;\n  this->num_distributions_ = 2;\n  auto params = this->sampler_->getParams();\n  params.use_same_noise_for_all_distributions = false;\n  this->sampler_->setParams(params);\n  inherited_from_gaussian<T>()(this->sampler_);\n  this->updateSampler();\n  this->generateNewSamples();\n\n  dim3 grid_dim(mppi::math::int_ceil(this->num_verifications_, this->thread_block_.x), 1, 1);\n  std::size_t shared_mem_bytes =\n      mppi::kernels::calcClassSharedMemSize<T>(this->sampler_.get(), this->thread_block_) +\n      sizeof(float) * (this->OUTPUT_DIM * this->thread_block_.x +\n                       this->thread_block_.x * this->thread_block_.y * this->thread_block_.z);\n\n  getFeedbackCostKernel<T><<<grid_dim, this->thread_block_, shared_mem_bytes, this->stream_>>>(\n      this->sampler_->sampling_d_, this->num_verifications_, this->lambda_, this->alpha_, this->sampling_indices_d_,\n      this->times_d_, this->distribution_indices_d_, this->outputs_d_, this->controls_d_, this->costs_d_);\n  HANDLE_ERROR(cudaGetLastError());\n\n  std::vector<float> costs_gpu(this->num_verifications_);\n  // Copy back to CPU\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_gpu_[i].data(), this->controls_d_ + i * T::CONTROL_DIM,\n                                 sizeof(float) * T::CONTROL_DIM, cudaMemcpyDeviceToHost, this->stream_));\n    // Query on CPU\n    float* u_i =\n        this->sampler_->getControlSample(this->sampled_indices_[i], this->sampled_times_[i],\n                                         this->sampled_distributions_[i], nullptr, this->sampled_outputs_[i].data());\n    HANDLE_ERROR(cudaMemcpyAsync(this->sampled_controls_cpu_[i].data(), u_i, sizeof(float) * T::CONTROL_DIM,\n                                 cudaMemcpyDeviceToHost, this->stream_));\n  }\n  HANDLE_ERROR(cudaMemcpyAsync(costs_gpu.data(), this->costs_d_, sizeof(float) * this->num_verifications_,\n                               cudaMemcpyDeviceToHost, this->stream_));\n  HANDLE_ERROR(cudaStreamSynchronize(this->stream_));\n\n  float cost;\n  for (int i = 0; i < this->num_verifications_; i++)\n  {\n    cost = 0.0f;\n    cost = this->sampler_->computeFeedbackCost(this->sampled_controls_cpu_[i], this->sampled_times_[i],\n                                               this->sampled_distributions_[i], this->lambda_, this->alpha_);\n    ASSERT_FLOAT_EQ(cost, costs_gpu[i]) << \" failed on sample \" << this->sampled_indices_[i] << \", time \"\n                                        << this->sampled_times_[i] << \", dist: \" << this->sampled_distributions_[i];\n  }\n}\n"
  },
  {
    "path": "tests/shaping_functions/CMakeLists.txt",
    "content": "# Rollout kernel test suite\nset(SHAPING_FUNCTION_TARGET_NAME shaping_function_tests)\n\nadd_executable(${SHAPING_FUNCTION_TARGET_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp shaping_function_test.cu)\ntarget_link_libraries(${SHAPING_FUNCTION_TARGET_NAME} gtest\n        gmock\n        gtest_main\n        ${MPPI_HEADER_LIBRARY_NAME})\n\ngtest_discover_tests(${SHAPING_FUNCTION_TARGET_NAME})\nset_target_properties(${SHAPING_FUNCTION_TARGET_NAME} PROPERTIES FOLDER test)\n\n# Rollout kernel test suite\nset(CEM_SHAPING_FUNCTION_TARGET_NAME cem_shaping_function_tests)\n\nadd_executable(${CEM_SHAPING_FUNCTION_TARGET_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp cem_shaping_function_test.cu)\ntarget_link_libraries(${CEM_SHAPING_FUNCTION_TARGET_NAME} gtest\n        gmock\n        gtest_main\n        ${MPPI_HEADER_LIBRARY_NAME})\n\ngtest_discover_tests(${CEM_SHAPING_FUNCTION_TARGET_NAME})\nset_target_properties(${CEM_SHAPING_FUNCTION_TARGET_NAME} PROPERTIES FOLDER test)\n"
  },
  {
    "path": "tests/shaping_functions/cem_shaping_function_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n\n#include <mppi/shaping_functions/CEM/cem_shaping_function.cuh>\n#include <kernel_tests/shaping_functions/shaping_function_kernels_tests.cuh>\n\nclass CEMShapingFunctionTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(100.0, 2.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(CEMShapingFunctionTest, computeWeightTest)\n{\n  const int num_rollouts = 500;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  CEMShapingFunctionParams params;\n  CEMShapingFunction<num_rollouts, 1> shaping_function;\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float target_cost = 99;\n\n  EXPECT_FLOAT_EQ(shaping_function.getNormalizer(), 1.0);\n\n  for (float gamma = 0.0; gamma <= 1.0; gamma += 0.1)\n  {\n    params.gamma = gamma;\n    shaping_function.setParams(params);\n\n    // int index = (int) num_rollouts*gamma;\n    for (int i = 0; i < cost_vec.size(); i++)\n    {\n      float weight = shaping_function.computeWeight(cost_vec.data(), target_cost, i);\n      if (cost_vec[i] > target_cost)\n      {\n        EXPECT_FLOAT_EQ(weight, 1.0 / ((int)num_rollouts * params.gamma + 1));\n      }\n      else\n      {\n        EXPECT_FLOAT_EQ(weight, 0.0);\n      }\n    }\n  }\n}\n\nTEST_F(CEMShapingFunctionTest, weightKernelTest)\n{\n  const int num_rollouts = 500;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  std::array<float, num_rollouts> result_cost_vec = { 0 };\n  CEMShapingFunctionParams params;\n  CEMShapingFunction<num_rollouts, 1> shaping_function;\n  shaping_function.GPUSetup();\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float baseline = 99;\n\n  for (float gamma = 0.0; gamma <= 1.0; gamma += 0.1)\n  {\n    params.gamma = gamma;\n    shaping_function.setParams(params);\n    launchShapingFunction_KernelTest<CEMShapingFunction<num_rollouts, 1>, num_rollouts, 1>(cost_vec, shaping_function,\n                                                                                           baseline, result_cost_vec);\n    for (int i = 0; i < cost_vec.size(); i++)\n    {\n      if (cost_vec[i] > baseline)\n      {\n        EXPECT_FLOAT_EQ(result_cost_vec[i], 1.0 / ((int)num_rollouts * params.gamma + 1));\n      }\n      else\n      {\n        EXPECT_FLOAT_EQ(result_cost_vec[i], 0.0);\n      }\n    }\n  }\n}\n\n/*\nTEST_F(ShapingFunctionTest, launchWeightKernelTest) {\nconst int num_rollouts = 500;\nShapingFunction<num_rollouts, 1>::cost_traj cost_traj;\nstd::array<float, num_rollouts> result_cost_vec = {0};\n\nShapingFunctionParams params;\nShapingFunction<num_rollouts, 1> shaping_function;\nshaping_function.GPUSetup();\n\n// Use a range based for loop to set the cost\ncost_traj = ShapingFunction<num_rollouts, 1>::cost_traj::Zero();\nfor (int i = 0; i < num_rollouts; i++) {\ncost_traj(i) = distribution(generator);\n}\n\nfloat min_cost_known = cost_traj.minCoeff();\n\nfor (float lambda_inv = 0.01; lambda_inv < 3.0; lambda_inv += 0.1) {\nparams.lambda_inv = lambda_inv;\nshaping_function.setParams(params);\nlaunchShapingFunction_KernelTest<ShapingFunction<num_rollouts, 1>, num_rollouts>(cost_traj, shaping_function,\nmin_cost_known, result_cost_vec); for (int i = 0; i < result_cost_vec.size(); i++) { EXPECT_FLOAT_EQ(result_cost_vec[i],\nexpf(-lambda_inv * (cost_traj(i) - min_cost_known)));\n}\n}\n}\n */\n\nTEST_F(CEMShapingFunctionTest, computeWeightsTest)\n{\n  const int num_rollouts = 500;\n  CEMShapingFunction<num_rollouts, 1>::cost_traj cost_traj;\n  CEMShapingFunction<num_rollouts, 1>::cost_traj cost_traj_copy;\n\n  CEMShapingFunctionParams params;\n  CEMShapingFunction<num_rollouts, 1> shaping_function;\n  shaping_function.GPUSetup();\n\n  // Use a range based for loop to set the cost\n  cost_traj = ShapingFunction<num_rollouts, 1>::cost_traj::Zero();\n  cost_traj_copy = ShapingFunction<num_rollouts, 1>::cost_traj::Zero();\n  const float gamma_total = 1.0;\n  const float gamma_inc = 0.1;\n  const float gamma_start = 0.0;\n\n  int index = 0;\n  for (float gamma = gamma_start; gamma <= gamma_total; gamma += gamma_inc)\n  {\n    // create the random vector\n    for (int i = 0; i < num_rollouts; i++)\n    {\n      cost_traj(i) = distribution(generator);\n    }\n\n    // find the correct costs for the baseline\n    std::sort(cost_traj.data(), cost_traj.data() + num_rollouts, std::greater<float>());\n    float pivot = cost_traj[gamma * num_rollouts];\n    std::random_shuffle(cost_traj.data(), cost_traj.data() + num_rollouts);\n    for (int i = 0; i < num_rollouts; i++)\n    {\n      cost_traj_copy(i) = cost_traj(i);\n    }\n\n    float* trajectory_costs_d;\n    HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * num_rollouts));\n    HANDLE_ERROR(\n        cudaMemcpy(trajectory_costs_d, cost_traj.data(), sizeof(float) * cost_traj.size(), cudaMemcpyHostToDevice))\n\n    params.gamma = gamma;\n    shaping_function.setParams(params);\n\n    shaping_function.computeWeights(cost_traj, trajectory_costs_d);\n\n    EXPECT_FLOAT_EQ(shaping_function.getBaseline(), pivot);\n    if (gamma == 0)\n    {\n      EXPECT_FLOAT_EQ(shaping_function.getBaseline(), cost_traj_copy.maxCoeff());\n    }\n    if (gamma == 1.0)\n    {\n      EXPECT_FLOAT_EQ(shaping_function.getBaseline(), cost_traj_copy.minCoeff());\n    }\n    EXPECT_EQ(shaping_function.getNormalizer(), 1.0);\n    EXPECT_NEAR(cost_traj.sum(), 1.0, 1e-5) << cost_traj.sum();\n    for (int i = 0; i < cost_traj.size(); i++)\n    {\n      if (cost_traj_copy(i) >= pivot)\n      {\n        EXPECT_FLOAT_EQ(cost_traj(i), 1.0 / ((int)num_rollouts * params.gamma + 1));\n      }\n      else\n      {\n        EXPECT_FLOAT_EQ(cost_traj(i), 0.0);\n      }\n    }\n    index++;\n  }\n}\n"
  },
  {
    "path": "tests/shaping_functions/shaping_function_test.cu",
    "content": "#include <gtest/gtest.h>\n#include <mppi/dynamics/cartpole/cartpole_dynamics.cuh>\n#include <mppi/cost_functions/cartpole/cartpole_quadratic_cost.cuh>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n\n#include <mppi/shaping_functions/shaping_function.cuh>\n\n#include <kernel_tests/shaping_functions/shaping_function_kernels_tests.cuh>\n\nclass ShapingFunctionTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(100.0, 2.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(ShapingFunctionTest, computeWeightTest)\n{\n  const int num_rollouts = 500;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  ShapingFunctionParams params;\n  ShapingFunction<num_rollouts, 1> shaping_function;\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float min_cost_known = *std::min_element(cost_vec.begin(), cost_vec.end());\n\n  for (float lambda_inv = 0.0; lambda_inv < 3.0; lambda_inv += 0.1)\n  {\n    params.lambda_inv = lambda_inv;\n    shaping_function.setParams(params);\n    for (int i = 0; i < cost_vec.size(); i++)\n    {\n      float weight = shaping_function.computeWeight(cost_vec.data(), min_cost_known, i);\n      EXPECT_FLOAT_EQ(weight, expf(-lambda_inv * (cost_vec[i] - min_cost_known)));\n    }\n  }\n}\n\nTEST_F(ShapingFunctionTest, weightKernelTest)\n{\n  const int num_rollouts = 500;\n  std::array<float, num_rollouts> cost_vec = { 0 };\n  std::array<float, num_rollouts> result_cost_vec = { 0 };\n  ShapingFunctionParams params;\n  ShapingFunction<num_rollouts, 1> shaping_function;\n  shaping_function.GPUSetup();\n\n  // Use a range based for loop to set the cost\n  for (auto& cost : cost_vec)\n  {\n    cost = distribution(generator);\n  }\n\n  float min_cost_known = *std::min_element(cost_vec.begin(), cost_vec.end());\n\n  for (float lambda_inv = 0.01; lambda_inv < 3.0; lambda_inv += 0.1)\n  {\n    params.lambda_inv = lambda_inv;\n    shaping_function.setParams(params);\n    launchShapingFunction_KernelTest<ShapingFunction<num_rollouts, 1>, num_rollouts, 1>(\n        cost_vec, shaping_function, min_cost_known, result_cost_vec);\n    for (int i = 0; i < cost_vec.size(); i++)\n    {\n      EXPECT_FLOAT_EQ(result_cost_vec[i], expf(-lambda_inv * (cost_vec[i] - min_cost_known)));\n    }\n  }\n}\n\nTEST_F(ShapingFunctionTest, launchWeightKernelTest)\n{\n  const int num_rollouts = 500;\n  ShapingFunction<num_rollouts, 1>::cost_traj cost_traj;\n  std::array<float, num_rollouts> result_cost_vec = { 0 };\n\n  ShapingFunctionParams params;\n  ShapingFunction<num_rollouts, 1> shaping_function;\n  shaping_function.GPUSetup();\n\n  // Use a range based for loop to set the cost\n  cost_traj = ShapingFunction<num_rollouts, 1>::cost_traj::Zero();\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    cost_traj(i) = distribution(generator);\n  }\n\n  float min_cost_known = cost_traj.minCoeff();\n\n  for (float lambda_inv = 0.01; lambda_inv < 3.0; lambda_inv += 0.1)\n  {\n    params.lambda_inv = lambda_inv;\n    shaping_function.setParams(params);\n    launchShapingFunction_KernelTest<ShapingFunction<num_rollouts, 1>, num_rollouts>(cost_traj, shaping_function,\n                                                                                     min_cost_known, result_cost_vec);\n    for (int i = 0; i < result_cost_vec.size(); i++)\n    {\n      EXPECT_FLOAT_EQ(result_cost_vec[i], expf(-lambda_inv * (cost_traj(i) - min_cost_known)));\n    }\n  }\n}\n\nTEST_F(ShapingFunctionTest, computeWeightsTest)\n{\n  const int num_rollouts = 500;\n  ShapingFunction<num_rollouts, 1>::cost_traj cost_traj;\n  ShapingFunction<num_rollouts, 1>::cost_traj result_cost_traj;\n\n  ShapingFunctionParams params;\n  ShapingFunction<num_rollouts, 1> shaping_function;\n  shaping_function.GPUSetup();\n\n  // Use a range based for loop to set the cost\n  cost_traj = ShapingFunction<num_rollouts, 1>::cost_traj::Zero();\n  for (int i = 0; i < num_rollouts; i++)\n  {\n    cost_traj(i) = distribution(generator);\n    result_cost_traj(i) = cost_traj(i);\n  }\n\n  float* trajectory_costs_d;\n  HANDLE_ERROR(cudaMalloc((void**)&trajectory_costs_d, sizeof(float) * num_rollouts));\n  HANDLE_ERROR(\n      cudaMemcpy(trajectory_costs_d, cost_traj.data(), sizeof(float) * cost_traj.size(), cudaMemcpyHostToDevice))\n\n  for (float lambda_inv = 0.01; lambda_inv < 3.0; lambda_inv += 0.1)\n  {\n    float min_cost_known = cost_traj.minCoeff();\n\n    params.lambda_inv = lambda_inv;\n    shaping_function.setParams(params);\n    shaping_function.computeWeights(cost_traj, trajectory_costs_d);\n    float normalizer = cost_traj.sum();\n\n    EXPECT_EQ(shaping_function.getBaseline(), min_cost_known);\n    EXPECT_EQ(shaping_function.getNormalizer(), normalizer);\n    for (int i = 0; i < cost_traj.size(); i++)\n    {\n      EXPECT_FLOAT_EQ(cost_traj(i), expf(-lambda_inv * (result_cost_traj(i) - min_cost_known)));\n      result_cost_traj(i) = cost_traj(i);\n    }\n  }\n}\n"
  },
  {
    "path": "tests/templated_headers/autorally_test_map.h.in",
    "content": "#ifndef TESTS_COST_FUNCTIONS_AUTORALLY_TEST_MAP_H_\n#define TESTS_COST_FUNCTIONS_AUTORALLY_TEST_MAP_H_\n\n#include <string>\n\nnamespace mppi {\nnamespace tests {\n  const std::string test_map_file = \"@TEST_MAP_FOLDER@/track_map.npz\";\n  const std::string standard_test_map_file = \"@TEST_MAP_FOLDER@/track_map_standard.npz\";\n  const std::string robust_test_map_file = \"@TEST_MAP_FOLDER@/track_map_robust.npz\";\n  const std::string ccrf_map = \"@PROJECT_SOURCE_DIR@/resources/ccrf_track.npz\";\n}\n}\n#endif // TESTS_COST_FUNCTIONS_AUTORALLY_TEST_MAP_H_\n"
  },
  {
    "path": "tests/templated_headers/autorally_test_network.h.in",
    "content": "#ifndef TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n#define TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n\n#include <string>\n\nnamespace mppi {\nnamespace tests {\n  const std::string test_load_nn_file = \"@TEST_NETWORK_FOLDER@/neuralNetLoadTest.npz\";\n  const std::string test_compute_nn_file = \"@TEST_NETWORK_FOLDER@/neuralNetComputeTest.npz\";\n  const std::string old_autorally_network_file = \"@PROJECT_SOURCE_DIR@/resources/autorally_nnet_09_12_2018.npz\";\n  // const std::string autorally_lstm_network_file = \"@PROJECT_SOURCE_DIR@/resources/integrator_FNN_LSTM_25_step_10_pre_no_out_20_1.npz\";\n  // const std::string autorally_lstm_network_file = \"@PROJECT_SOURCE_DIR@/resources/bogdan.npz\";\n  const std::string autorally_lstm_network_file = \"@PROJECT_SOURCE_DIR@/resources/lstm.npz\";\n  const std::string autorally_hidden_network_file = \"@PROJECT_SOURCE_DIR@/resources/hidden_init.npz\";\n  const std::string autorally_cell_network_file = \"@PROJECT_SOURCE_DIR@/resources/cell_init.npz\";\n  const std::string autorally_output_network_file = \"@PROJECT_SOURCE_DIR@/resources/output.npz\";\n}\n}\n#endif // TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n"
  },
  {
    "path": "tests/templated_headers/racer_test_networks.h.in",
    "content": "#ifndef TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n#define TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n\n#include <string>\n\nnamespace mppi {\nnamespace tests {\n  //const std::string steering_lstm = \"@PROJECT_SOURCE_DIR@/resources/lstm_lstm_steering.npz\";\n  const std::string steering_lstm = \"@PROJECT_SOURCE_DIR@/resources/lstm_lstm_steering_accel.npz\";\n  const std::string brake_delay_lstm = \"@PROJECT_SOURCE_DIR@/resources/lstm_lstm_brake_delay.npz\";\n  const std::string ackerman_lstm = \"@PROJECT_SOURCE_DIR@/resources/lstm_lstm_ackerman.npz\";\n  const std::string ackerman_test = \"@PROJECT_SOURCE_DIR@/resources/ackerman_test.npz\";\n  const std::string bicycle_slip_hybrid_test = \"@PROJECT_SOURCE_DIR@/resources/bicycle_slip_hybrid_test.npz\";\n  const std::string bicycle_slip_hybrid_true = \"@PROJECT_SOURCE_DIR@/resources/bicycle_slip_hybrid.npz\";\n  const std::string bicycle_slip_kinematic_test = \"@PROJECT_SOURCE_DIR@/resources/bicycle_slip_kinematic_test.npz\";\n  const std::string bicycle_slip_kinematic_true = \"@PROJECT_SOURCE_DIR@/resources/lstm_lstm_bicycle_slip_kinematic.npz\";\n  const std::string racer_dubins_elevation_uncertainty_test = \"@PROJECT_SOURCE_DIR@/resources/network_rde_test.npz\";\n}\n}\n#endif // TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n"
  },
  {
    "path": "tests/templated_headers/test_networks.h.in",
    "content": "#ifndef TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n#define TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n\n#include <string>\n\nnamespace mppi {\nnamespace tests {\n  const std::string test_load_nn_file = \"@TEST_NETWORK_FOLDER@/neuralNetLoadTest.npz\";\n  const std::string test_lstm_lstm = \"@PROJECT_SOURCE_DIR@/resources/lstm_lstm_test.npz\";\n}\n}\n#endif // TESTS_COST_FUNCTIONS_AUTORALLY_TEST_NETWORK_H_\n"
  },
  {
    "path": "tests/test_main.cpp",
    "content": "//\n// Created by mgandhi3 on 11/18/19.\n//\n\n#include <gtest/gtest.h>\n\nint main(int argc, char** argv)\n{\n  ::testing::InitGoogleTest(&argc, argv);\n  return RUN_ALL_TESTS();\n}"
  },
  {
    "path": "tests/texture_helpers/CMakeLists.txt",
    "content": "set(GTEST_LIBRARIES gtest gmock gtest_main)\nfile(GLOB TARGET_SRCS RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.cu)\n\nforeach(T_FILE IN LISTS TARGET_SRCS)\n  get_filename_component(T_NAME ${T_FILE} NAME_WE)\n  add_executable(${T_NAME} ${PROJECT_SOURCE_DIR}/tests/test_main.cpp ${T_FILE})\n  target_link_libraries(${T_NAME}\n          ${GTEST_LIBRARIES}\n          ${MPPI_HEADER_LIBRARY_NAME})\n  gtest_discover_tests(${T_NAME})\n  set_target_properties(${T_NAME} PROPERTIES FOLDER test)\nendforeach()\n"
  },
  {
    "path": "tests/texture_helpers/texture_helper_test.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n\n#include <mppi/utils/texture_helpers/texture_helper.cuh>\n\nclass TextureHelperTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(100.0, 2.0);\n  }\n\n  void TearDown() override\n  {\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nclass TextureHelperImpl : public TextureHelper<TextureHelperImpl, float4>\n{\npublic:\n  TextureHelperImpl(int number, cudaStream_t stream = 0) : TextureHelper<TextureHelperImpl, float4>(number, stream)\n  {\n  }\n\n  ~TextureHelperImpl()\n  {\n    for (TextureParams<float4>& params : textures_)\n    {\n      params.allocated = false;\n    }\n  }\n\n  std::vector<TextureParams<float4>> getTextures()\n  {\n    return textures_;\n  }\n\n  std::vector<TextureParams<float4>> getTexturesBuffer()\n  {\n    return textures_buffer_;\n  }\n\n  TextureParams<float4>* getParamsD()\n  {\n    return this->params_d_;\n  }\n\n  void setTestExtent(int index, cudaExtent extent)\n  {\n    this->textures_buffer_[index].extent = extent;\n    this->textures_[index].extent = extent;\n  }\n\n  void setGpuMemStatus(bool flag)\n  {\n    this->GPUMemStatus_ = flag;\n  }\n\n  void setTestFlagsInParam(int index, bool update_mem, bool update_data, bool allocated, bool update_params)\n  {\n    this->textures_buffer_[index].update_mem = update_mem;\n    this->textures_buffer_[index].update_data = update_data;\n    this->textures_[index].allocated = allocated;\n    this->textures_buffer_[index].update_params = update_params;\n  }\n\n  void copyDataToGPU(int index, bool sync = false) override\n  {\n    copyDataToGPUCalled++;\n    this->textures_[index].update_data = false;\n  }\n\n  void allocateCudaTexture(int index) override\n  {\n    if (this->textures_[index].allocated)\n    {\n      freeCudaMem(this->textures_[index]);\n    }\n    allocateCudaTextureCalled++;\n  }\n\n  void createCudaTexture(int index, bool sync = false) override\n  {\n    createCudaTextureCalled++;\n    this->textures_[index].allocated = true;\n    this->textures_[index].update_mem = false;\n  }\n\n  void copyParamsToGPU(int index, bool sync = false)\n  {\n    copyParamsToGPUCalled++;\n    this->textures_[index].update_params = false;\n  }\n\n  void clearCounters()\n  {\n    copyDataToGPUCalled = 0;\n    allocateCudaTextureCalled = 0;\n    createCudaTextureCalled = 0;\n    copyParamsToGPUCalled = 0;\n    freeCudaMemCalled = 0;\n  }\n\n  void freeCudaMem(TextureParams<float4>& texture)\n  {\n    texture.allocated = false;\n    texture.use = false;\n    freeCudaMemCalled++;\n  }\n\n  TextureParams<float4>* getTextureD()\n  {\n    return this->textures_d_;\n  }\n\n  TextureParams<float4>* getTextureDataPtr()\n  {\n    return this->textures_.data();\n  }\n\n  int copyDataToGPUCalled = 0;\n  int allocateCudaTextureCalled = 0;\n  int createCudaTextureCalled = 0;\n  int copyParamsToGPUCalled = 0;\n  int freeCudaMemCalled = 0;\n\n  float4 queryTexture(const int index, const float3& input)\n  {\n    float4 result;\n    result.x = input.x;\n    result.y = input.y;\n    result.z = input.z;\n    result.w = index;\n    return result;\n  }\n};\n\nTEST_F(TextureHelperTest, Constructor)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n\n  EXPECT_EQ(textures.size(), number);\n  EXPECT_EQ(helper.getCpuValues().size(), number);\n  EXPECT_EQ(helper.getCpuBufferValues().size(), number);\n  EXPECT_NE(helper.getTextureD(), nullptr);\n  EXPECT_NE(helper.getTextureDataPtr(), nullptr);\n  EXPECT_EQ(helper.ptr_d_, nullptr);\n  EXPECT_EQ(helper.getParamsD(), nullptr);\n  EXPECT_EQ(helper.getTextureD(), helper.getTextureDataPtr());\n\n  for (const TextureParams<float4> texture : textures)\n  {\n    EXPECT_EQ(texture.use, false);\n    EXPECT_EQ(texture.allocated, false);\n    EXPECT_EQ(texture.update_data, false);\n    EXPECT_EQ(texture.update_mem, false);\n\n    EXPECT_EQ(texture.array_d, nullptr);\n    EXPECT_EQ(texture.tex_d, 0);\n\n    EXPECT_FLOAT_EQ(texture.origin.x, 0);\n    EXPECT_FLOAT_EQ(texture.origin.y, 0);\n    EXPECT_FLOAT_EQ(texture.origin.z, 0);\n\n    EXPECT_FLOAT_EQ(texture.rotations[0].x, 1);\n    EXPECT_FLOAT_EQ(texture.rotations[0].y, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[0].z, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[1].x, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[1].y, 1);\n    EXPECT_FLOAT_EQ(texture.rotations[1].z, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[2].x, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[2].y, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[2].z, 1);\n\n    EXPECT_EQ(texture.resDesc.resType, cudaResourceTypeArray);\n    EXPECT_EQ(texture.channelDesc.x, 32);\n    EXPECT_EQ(texture.channelDesc.y, 32);\n    EXPECT_EQ(texture.channelDesc.z, 32);\n    EXPECT_EQ(texture.channelDesc.w, 32);\n\n    EXPECT_EQ(texture.texDesc.addressMode[0], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.addressMode[1], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.filterMode, cudaFilterModeLinear);\n    EXPECT_EQ(texture.texDesc.readMode, cudaReadModeElementType);\n    EXPECT_EQ(texture.texDesc.normalizedCoords, 1);\n  }\n\n  textures = helper.getTexturesBuffer();\n\n  EXPECT_EQ(textures.size(), number);\n  EXPECT_NE(helper.getTextureD(), nullptr);\n  EXPECT_NE(helper.getTextureDataPtr(), nullptr);\n  EXPECT_EQ(helper.ptr_d_, nullptr);\n  EXPECT_EQ(helper.getParamsD(), nullptr);\n  EXPECT_EQ(helper.getTextureD(), helper.getTextureDataPtr());\n\n  for (const TextureParams<float4> texture : textures)\n  {\n    EXPECT_EQ(texture.use, false);\n    EXPECT_EQ(texture.allocated, false);\n    EXPECT_EQ(texture.update_data, false);\n    EXPECT_EQ(texture.update_mem, false);\n\n    EXPECT_EQ(texture.array_d, nullptr);\n    EXPECT_EQ(texture.tex_d, 0);\n\n    EXPECT_FLOAT_EQ(texture.origin.x, 0);\n    EXPECT_FLOAT_EQ(texture.origin.y, 0);\n    EXPECT_FLOAT_EQ(texture.origin.z, 0);\n\n    EXPECT_FLOAT_EQ(texture.rotations[0].x, 1);\n    EXPECT_FLOAT_EQ(texture.rotations[0].y, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[0].z, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[1].x, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[1].y, 1);\n    EXPECT_FLOAT_EQ(texture.rotations[1].z, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[2].x, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[2].y, 0);\n    EXPECT_FLOAT_EQ(texture.rotations[2].z, 1);\n\n    EXPECT_EQ(texture.resDesc.resType, cudaResourceTypeArray);\n    EXPECT_EQ(texture.channelDesc.x, 32);\n    EXPECT_EQ(texture.channelDesc.y, 32);\n    EXPECT_EQ(texture.channelDesc.z, 32);\n    EXPECT_EQ(texture.channelDesc.w, 32);\n\n    EXPECT_EQ(texture.texDesc.addressMode[0], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.addressMode[1], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.filterMode, cudaFilterModeLinear);\n    EXPECT_EQ(texture.texDesc.readMode, cudaReadModeElementType);\n    EXPECT_EQ(texture.texDesc.normalizedCoords, 1);\n  }\n}\n\nTEST_F(TextureHelperTest, UpdateOriginTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n  helper.updateOrigin(0, make_float3(1, 2, 3));\n  helper.updateOrigin(1, make_float3(4, 5, 6));\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  std::vector<TextureParams<float4>> textures_buffer = helper.getTexturesBuffer();\n  EXPECT_FLOAT_EQ(textures[0].origin.x, 0);\n  EXPECT_FLOAT_EQ(textures[0].origin.y, 0);\n  EXPECT_FLOAT_EQ(textures[0].origin.z, 0);\n  EXPECT_FLOAT_EQ(textures_buffer[0].origin.x, 1);\n  EXPECT_FLOAT_EQ(textures_buffer[0].origin.y, 2);\n  EXPECT_FLOAT_EQ(textures_buffer[0].origin.z, 3);\n\n  EXPECT_FLOAT_EQ(textures[1].origin.x, 0);\n  EXPECT_FLOAT_EQ(textures[1].origin.y, 0);\n  EXPECT_FLOAT_EQ(textures[1].origin.z, 0);\n  EXPECT_FLOAT_EQ(textures_buffer[1].origin.x, 4);\n  EXPECT_FLOAT_EQ(textures_buffer[1].origin.y, 5);\n  EXPECT_FLOAT_EQ(textures_buffer[1].origin.z, 6);\n\n  EXPECT_TRUE(textures_buffer[0].update_params);\n  EXPECT_TRUE(textures_buffer[1].update_params);\n  for (int i = 2; i < number; i++)\n  {\n    EXPECT_FALSE(textures_buffer[i].update_params);\n  }\n}\n\nTEST_F(TextureHelperTest, UpdateRotationTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(1, 2, 3);\n  new_rot_mat[1] = make_float3(4, 5, 6);\n  new_rot_mat[2] = make_float3(7, 8, 9);\n  helper.updateRotation(0, new_rot_mat);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  std::vector<TextureParams<float4>> textures_buffer = helper.getTexturesBuffer();\n\n  // buffer is updated\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[0].x, 1);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[0].y, 2);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[0].z, 3);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[1].x, 4);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[1].y, 5);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[1].z, 6);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[2].x, 7);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[2].y, 8);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[2].z, 9);\n  // non buffer not updated\n  EXPECT_FLOAT_EQ(textures[0].rotations[0].x, 1);\n  EXPECT_FLOAT_EQ(textures[0].rotations[0].y, 0);\n  EXPECT_FLOAT_EQ(textures[0].rotations[0].z, 0);\n  EXPECT_FLOAT_EQ(textures[0].rotations[1].x, 0);\n  EXPECT_FLOAT_EQ(textures[0].rotations[1].y, 1);\n  EXPECT_FLOAT_EQ(textures[0].rotations[1].z, 0);\n  EXPECT_FLOAT_EQ(textures[0].rotations[2].x, 0);\n  EXPECT_FLOAT_EQ(textures[0].rotations[2].y, 0);\n  EXPECT_FLOAT_EQ(textures[0].rotations[2].z, 1);\n\n  new_rot_mat[2].x = 22;\n  helper.updateRotation(1, new_rot_mat);\n  textures_buffer = helper.getTexturesBuffer();\n\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[0].x, 1);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[0].y, 2);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[0].z, 3);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[1].x, 4);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[1].y, 5);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[1].z, 6);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[2].x, 7);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[2].y, 8);\n  EXPECT_FLOAT_EQ(textures_buffer[0].rotations[2].z, 9);\n\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[0].x, 1);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[0].y, 2);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[0].z, 3);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[1].x, 4);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[1].y, 5);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[1].z, 6);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[2].x, 22);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[2].y, 8);\n  EXPECT_FLOAT_EQ(textures_buffer[1].rotations[2].z, 9);\n}\n\nTEST_F(TextureHelperTest, SetExtentTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n  cudaExtent extent = make_cudaExtent(4, 5, 6);\n  helper.setTestExtent(1, extent);\n\n  helper.setExtent(0, extent);\n  helper.setExtent(1, extent);\n\n  extent = make_cudaExtent(4, 5, 0);\n  helper.setExtent(2, extent);\n  helper.setTestExtent(3, extent);\n  helper.setExtent(3, extent);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  std::vector<TextureParams<float4>> textures_buffer = helper.getTexturesBuffer();\n\n  EXPECT_TRUE(textures_buffer[0].update_mem);\n  EXPECT_EQ(textures_buffer[0].extent.width, 4);\n  EXPECT_EQ(textures_buffer[0].extent.height, 5);\n  EXPECT_EQ(textures_buffer[0].extent.depth, 6);\n  EXPECT_FALSE(textures[0].update_mem);\n  EXPECT_NE(textures[0].extent.width, 4);\n  EXPECT_NE(textures[0].extent.height, 5);\n  EXPECT_NE(textures[0].extent.depth, 6);\n\n  EXPECT_FALSE(textures_buffer[1].update_mem);\n  EXPECT_EQ(textures_buffer[1].extent.width, 4);\n  EXPECT_EQ(textures_buffer[1].extent.height, 5);\n  EXPECT_EQ(textures_buffer[1].extent.depth, 6);\n\n  EXPECT_TRUE(textures_buffer[2].update_mem);\n  EXPECT_EQ(textures_buffer[2].extent.width, 4);\n  EXPECT_EQ(textures_buffer[2].extent.height, 5);\n  EXPECT_EQ(textures_buffer[2].extent.depth, 0);\n\n  EXPECT_FALSE(textures_buffer[3].update_mem);\n  EXPECT_EQ(textures_buffer[3].extent.width, 4);\n  EXPECT_EQ(textures_buffer[3].extent.height, 5);\n  EXPECT_EQ(textures_buffer[3].extent.depth, 0);\n}\n\nTEST_F(TextureHelperTest, CopyToDeviceTest)\n{\n  int number = 16;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  helper.setTestFlagsInParam(0, false, false, false, false);\n  helper.setTestFlagsInParam(1, false, false, false, true);\n  helper.setTestFlagsInParam(2, false, false, true, false);\n  helper.setTestFlagsInParam(3, false, false, true, true);\n  helper.setTestFlagsInParam(4, false, true, false, false);\n  helper.setTestFlagsInParam(5, false, true, false, true);\n  helper.setTestFlagsInParam(6, false, true, true, false);\n  helper.setTestFlagsInParam(7, false, true, true, true);\n  helper.setTestFlagsInParam(8, true, false, false, false);\n  helper.setTestFlagsInParam(9, true, false, false, true);\n  helper.setTestFlagsInParam(10, true, false, true, false);\n  helper.setTestFlagsInParam(11, true, false, true, true);\n  helper.setTestFlagsInParam(12, true, true, false, false);\n  helper.setTestFlagsInParam(13, true, true, false, true);\n  helper.setTestFlagsInParam(14, true, true, true, false);\n  helper.setTestFlagsInParam(15, true, true, true, true);\n\n  // nothing should have happened since GPUMemStatus=False\n  helper.copyToDevice();\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  std::vector<TextureParams<float4>> textures_buffer = helper.getTexturesBuffer();\n\n  for (int i = 0; i < textures_buffer.size(); i++)\n  {\n    EXPECT_FALSE(textures_buffer[i].update_mem);\n    EXPECT_FALSE(textures_buffer[i].update_data);\n    EXPECT_FALSE(textures_buffer[i].allocated);\n    EXPECT_FALSE(textures_buffer[i].update_params);\n  }\n\n  EXPECT_FALSE(textures[0].update_mem);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_params);\n\n  EXPECT_FALSE(textures[1].update_mem);\n  EXPECT_FALSE(textures[1].update_data);\n  EXPECT_FALSE(textures[1].allocated);\n  EXPECT_TRUE(textures[1].update_params);\n\n  EXPECT_FALSE(textures[2].update_mem);\n  EXPECT_FALSE(textures[2].update_data);\n  EXPECT_TRUE(textures[2].allocated);\n  EXPECT_FALSE(textures[2].update_params);\n\n  EXPECT_FALSE(textures[3].update_mem);\n  EXPECT_FALSE(textures[3].update_data);\n  EXPECT_TRUE(textures[3].allocated);\n  EXPECT_TRUE(textures[3].update_params);\n\n  EXPECT_FALSE(textures[4].update_mem);\n  EXPECT_TRUE(textures[4].update_data);\n  EXPECT_FALSE(textures[4].allocated);\n  EXPECT_FALSE(textures[4].update_params);\n\n  EXPECT_FALSE(textures[5].update_mem);\n  EXPECT_TRUE(textures[5].update_data);\n  EXPECT_FALSE(textures[5].allocated);\n  EXPECT_TRUE(textures[5].update_params);\n\n  EXPECT_FALSE(textures[6].update_mem);\n  EXPECT_TRUE(textures[6].update_data);\n  EXPECT_TRUE(textures[6].allocated);\n  EXPECT_FALSE(textures[6].update_params);\n\n  EXPECT_FALSE(textures[7].update_mem);\n  EXPECT_TRUE(textures[7].update_data);\n  EXPECT_TRUE(textures[7].allocated);\n  EXPECT_TRUE(textures[7].update_params);\n\n  EXPECT_TRUE(textures[8].update_mem);\n  EXPECT_FALSE(textures[8].update_data);\n  EXPECT_FALSE(textures[8].allocated);\n  EXPECT_FALSE(textures[8].update_params);\n\n  EXPECT_TRUE(textures[9].update_mem);\n  EXPECT_FALSE(textures[9].update_data);\n  EXPECT_FALSE(textures[9].allocated);\n  EXPECT_TRUE(textures[9].update_params);\n\n  EXPECT_TRUE(textures[10].update_mem);\n  EXPECT_FALSE(textures[10].update_data);\n  EXPECT_TRUE(textures[10].allocated);\n  EXPECT_FALSE(textures[10].update_params);\n\n  EXPECT_TRUE(textures[11].update_mem);\n  EXPECT_FALSE(textures[11].update_data);\n  EXPECT_TRUE(textures[11].allocated);\n  EXPECT_TRUE(textures[11].update_params);\n\n  EXPECT_TRUE(textures[12].update_mem);\n  EXPECT_TRUE(textures[12].update_data);\n  EXPECT_FALSE(textures[12].allocated);\n  EXPECT_FALSE(textures[12].update_params);\n\n  EXPECT_TRUE(textures[13].update_mem);\n  EXPECT_TRUE(textures[13].update_data);\n  EXPECT_FALSE(textures[13].allocated);\n  EXPECT_TRUE(textures[13].update_params);\n\n  EXPECT_TRUE(textures[14].update_mem);\n  EXPECT_TRUE(textures[14].update_data);\n  EXPECT_TRUE(textures[14].allocated);\n  EXPECT_FALSE(textures[14].update_params);\n\n  EXPECT_TRUE(textures[15].update_mem);\n  EXPECT_TRUE(textures[15].update_data);\n  EXPECT_TRUE(textures[15].allocated);\n  EXPECT_TRUE(textures[15].update_params);\n\n  EXPECT_EQ(helper.copyDataToGPUCalled, 0);\n  EXPECT_EQ(helper.allocateCudaTextureCalled, 0);\n  EXPECT_EQ(helper.createCudaTextureCalled, 0);\n  EXPECT_EQ(helper.copyParamsToGPUCalled, 0);\n  EXPECT_EQ(helper.freeCudaMemCalled, 0);\n\n  helper.clearCounters();\n  helper.setGpuMemStatus(true);\n  helper.copyToDevice();\n  textures = helper.getTextures();\n\n  for (int i = 0; i < textures_buffer.size(); i++)\n  {\n    EXPECT_FALSE(textures_buffer[i].update_mem);\n    EXPECT_FALSE(textures_buffer[i].update_data);\n    EXPECT_FALSE(textures_buffer[i].allocated);\n    EXPECT_FALSE(textures_buffer[i].update_params);\n  }\n\n  EXPECT_FALSE(textures[0].update_mem);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_params);\n\n  EXPECT_FALSE(textures[1].update_mem);\n  EXPECT_FALSE(textures[1].update_data);\n  EXPECT_FALSE(textures[1].allocated);\n  EXPECT_TRUE(textures[1].update_params);\n\n  EXPECT_FALSE(textures[2].update_mem);\n  EXPECT_FALSE(textures[2].update_data);\n  EXPECT_TRUE(textures[2].allocated);\n  EXPECT_FALSE(textures[2].update_params);\n\n  EXPECT_FALSE(textures[3].update_mem);\n  EXPECT_FALSE(textures[3].update_data);\n  EXPECT_TRUE(textures[3].allocated);\n  EXPECT_FALSE(textures[3].update_params);\n\n  EXPECT_FALSE(textures[4].update_mem);\n  EXPECT_TRUE(textures[4].update_data);\n  EXPECT_FALSE(textures[4].allocated);\n  EXPECT_FALSE(textures[4].update_params);\n\n  EXPECT_FALSE(textures[5].update_mem);\n  EXPECT_TRUE(textures[5].update_data);\n  EXPECT_FALSE(textures[5].allocated);\n  EXPECT_TRUE(textures[5].update_params);\n\n  // false, true, true, false\n  EXPECT_FALSE(textures[6].update_mem);\n  EXPECT_FALSE(textures[6].update_data);\n  EXPECT_TRUE(textures[6].allocated);\n  EXPECT_FALSE(textures[6].update_params);\n\n  EXPECT_FALSE(textures[7].update_mem);\n  EXPECT_FALSE(textures[7].update_data);\n  EXPECT_TRUE(textures[7].allocated);\n  EXPECT_FALSE(textures[7].update_params);\n\n  EXPECT_FALSE(textures[8].update_mem);\n  EXPECT_FALSE(textures[8].update_data);\n  EXPECT_TRUE(textures[8].allocated);\n  EXPECT_FALSE(textures[8].update_params);\n\n  // true, false, false, true\n  EXPECT_FALSE(textures[9].update_mem);\n  EXPECT_FALSE(textures[9].update_data);\n  EXPECT_TRUE(textures[9].allocated);\n  EXPECT_FALSE(textures[9].update_params);\n\n  EXPECT_FALSE(textures[10].update_mem);\n  EXPECT_FALSE(textures[10].update_data);\n  EXPECT_TRUE(textures[10].allocated);\n  EXPECT_FALSE(textures[10].update_params);\n\n  EXPECT_FALSE(textures[11].update_mem);\n  EXPECT_FALSE(textures[11].update_data);\n  EXPECT_TRUE(textures[11].allocated);\n  EXPECT_FALSE(textures[11].update_params);\n\n  EXPECT_FALSE(textures[12].update_mem);\n  EXPECT_FALSE(textures[12].update_data);\n  EXPECT_TRUE(textures[12].allocated);\n  EXPECT_FALSE(textures[12].update_params);\n\n  EXPECT_FALSE(textures[13].update_mem);\n  EXPECT_FALSE(textures[13].update_data);\n  EXPECT_TRUE(textures[13].allocated);\n  EXPECT_FALSE(textures[13].update_params);\n\n  EXPECT_FALSE(textures[14].update_mem);\n  EXPECT_FALSE(textures[14].update_data);\n  EXPECT_TRUE(textures[14].allocated);\n  EXPECT_FALSE(textures[14].update_params);\n\n  EXPECT_FALSE(textures[15].update_mem);\n  EXPECT_FALSE(textures[15].update_data);\n  EXPECT_TRUE(textures[15].allocated);\n  EXPECT_FALSE(textures[15].update_params);\n\n  EXPECT_EQ(helper.copyDataToGPUCalled, 6);\n  EXPECT_EQ(helper.allocateCudaTextureCalled, 8);\n  EXPECT_EQ(helper.createCudaTextureCalled, 8);\n  EXPECT_EQ(helper.copyParamsToGPUCalled, 6);\n  EXPECT_EQ(helper.freeCudaMemCalled, 4);\n}\n\nTEST_F(TextureHelperTest, WorldPoseToMapPoseTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  // set it to be zero\n  std::array<float3, 3> new_rot_mat{};\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(0, 0, 0));\n\n  float3 input = make_float3(0.1, 0.2, 0.3);\n  float3 output;\n  helper.worldPoseToMapPose(0, input, output);\n\n  EXPECT_FLOAT_EQ(input.x, 0.1);\n  EXPECT_FLOAT_EQ(input.y, 0.2);\n  EXPECT_FLOAT_EQ(input.z, 0.3);\n  EXPECT_FLOAT_EQ(output.x, 0.1);\n  EXPECT_FLOAT_EQ(output.y, 0.2);\n  EXPECT_FLOAT_EQ(output.z, 0.3);\n\n  helper.copyToDevice();\n  helper.worldPoseToMapPose(0, input, output);\n\n  EXPECT_FLOAT_EQ(input.x, 0.1);\n  EXPECT_FLOAT_EQ(input.y, 0.2);\n  EXPECT_FLOAT_EQ(input.z, 0.3);\n  EXPECT_FLOAT_EQ(output.x, 0);\n  EXPECT_FLOAT_EQ(output.y, 0);\n  EXPECT_FLOAT_EQ(output.z, 0);\n\n  // set rot mat non zero\n  new_rot_mat[0] = make_float3(1, 2, 3);\n  new_rot_mat[1] = make_float3(4, 5, 6);\n  new_rot_mat[2] = make_float3(7, 8, 9);\n  helper.updateRotation(0, new_rot_mat);\n  helper.copyToDevice();\n\n  helper.worldPoseToMapPose(0, input, output);\n  EXPECT_FLOAT_EQ(input.x, 0.1);\n  EXPECT_FLOAT_EQ(input.y, 0.2);\n  EXPECT_FLOAT_EQ(input.z, 0.3);\n  EXPECT_FLOAT_EQ(output.x, 0.1 + 0.2 * 2 + 0.3 * 3);\n  EXPECT_FLOAT_EQ(output.y, 0.1 * 4 + 0.2 * 5 + 0.3 * 6);\n  EXPECT_FLOAT_EQ(output.z, 0.1 * 7 + 0.2 * 8 + 0.3 * 9);\n\n  // set origin non zero\n  new_rot_mat[0] = make_float3(0, 0, 0);\n  new_rot_mat[1] = make_float3(0, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 0);\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(0.1, 0.2, 0.3));\n  helper.copyToDevice();\n\n  helper.worldPoseToMapPose(0, input, output);\n  EXPECT_FLOAT_EQ(input.x, 0.1);\n  EXPECT_FLOAT_EQ(input.y, 0.2);\n  EXPECT_FLOAT_EQ(input.z, 0.3);\n  EXPECT_FLOAT_EQ(output.x, 0);\n  EXPECT_FLOAT_EQ(output.y, 0);\n  EXPECT_FLOAT_EQ(output.z, 0);\n\n  // set rot mat non zero\n  new_rot_mat[0] = make_float3(1, 2, 3);\n  new_rot_mat[1] = make_float3(4, 5, 6);\n  new_rot_mat[2] = make_float3(7, 8, 9);\n  helper.updateRotation(0, new_rot_mat);\n  helper.copyToDevice();\n  input = make_float3(0.2, 0.4, 0.6);\n\n  helper.worldPoseToMapPose(0, input, output);\n  EXPECT_FLOAT_EQ(input.x, 0.2);\n  EXPECT_FLOAT_EQ(input.y, 0.4);\n  EXPECT_FLOAT_EQ(input.z, 0.6);\n  EXPECT_FLOAT_EQ(output.x, 0.1 + 0.2 * 2 + 0.3 * 3);\n  EXPECT_FLOAT_EQ(output.y, 0.1 * 4 + 0.2 * 5 + 0.3 * 6);\n  EXPECT_FLOAT_EQ(output.z, 0.1 * 7 + 0.2 * 8 + 0.3 * 9);\n}\n\nTEST_F(TextureHelperTest, BodyOffsetToWorldPoseTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  // set it to be zero\n  std::array<float3, 3> new_rot_mat{};\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(0, 0, 0));\n\n  float3 input = make_float3(0.1, 0.2, 0.3);\n  float3 offset = make_float3(1.1, 1.3, 1.5);\n  float3 rotation = make_float3(0, 0, 0);\n  float3 output;\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n\n  EXPECT_FLOAT_EQ(input.x, 0.1);\n  EXPECT_FLOAT_EQ(input.y, 0.2);\n  EXPECT_FLOAT_EQ(input.z, 0.3);\n  EXPECT_FLOAT_EQ(output.x, 1.2);\n  EXPECT_FLOAT_EQ(output.y, 1.5);\n  EXPECT_FLOAT_EQ(output.z, 1.8);\n\n  // rotate by positive 90 degrees yaw\n  rotation = make_float3(0, 0, M_PI_2);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, -1.2);\n  EXPECT_FLOAT_EQ(output.y, 1.3);\n  EXPECT_FLOAT_EQ(output.z, 1.8);\n\n  // rotate by -90 degrees yaw\n  rotation = make_float3(0, 0, -M_PI_2);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, 0.1 + 1.3);\n  EXPECT_FLOAT_EQ(output.y, 0.2 - 1.1);\n  EXPECT_FLOAT_EQ(output.z, 1.8);\n\n  // rotate by +45 degrees yaw\n  rotation = make_float3(0, 0, M_PI_4);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  // EXPECT_FLOAT_EQ(output.x, -0.041421525);\n  EXPECT_NEAR(output.x, -0.041421525, 1.0e-6);\n  EXPECT_FLOAT_EQ(output.y, 1.8970563);\n  EXPECT_FLOAT_EQ(output.z, 1.8);\n\n  // rotate by +90 degrees roll\n  rotation = make_float3(M_PI_2, 0, 0);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, 0.1 + 1.1);\n  EXPECT_FLOAT_EQ(output.y, 0.2 - 1.5);\n  EXPECT_FLOAT_EQ(output.z, 0.3 + 1.3);\n\n  // rotate by -90 degrees roll\n  rotation = make_float3(-M_PI_2, 0, 0);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, 0.1 + 1.1);\n  EXPECT_FLOAT_EQ(output.y, 0.2 + 1.5);\n  EXPECT_FLOAT_EQ(output.z, 0.3 - 1.3);\n\n  // rotate by +90 degrees roll\n  rotation = make_float3(0, M_PI_2, 0);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, 0.1 + 1.5);\n  EXPECT_FLOAT_EQ(output.y, 0.2 + 1.3);\n  EXPECT_FLOAT_EQ(output.z, 0.3 - 1.1);\n\n  // rotate by -90 degrees roll\n  rotation = make_float3(0, -M_PI_2, 0);\n  helper.copyToDevice();\n  helper.bodyOffsetToWorldPose(offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, 0.1 - 1.5);\n  EXPECT_FLOAT_EQ(output.y, 0.2 + 1.3);\n  EXPECT_FLOAT_EQ(output.z, 0.3 + 1.1);\n}\n\nTEST_F(TextureHelperTest, MapPoseToTexCoordTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  float3 input = make_float3(1.0, 2.0, 3.0);\n  float3 output;\n\n  helper.updateResolution(0, 10);\n  cudaExtent extent = make_cudaExtent(4, 5, 6);\n  helper.setExtent(0, extent);\n  helper.copyToDevice();\n\n  helper.mapPoseToTexCoord(0, input, output);\n  EXPECT_FLOAT_EQ(input.x, 1.0);\n  EXPECT_FLOAT_EQ(input.y, 2.0);\n  EXPECT_FLOAT_EQ(input.z, 3.0);\n  EXPECT_FLOAT_EQ(output.x, (0.1) / 4);\n  EXPECT_FLOAT_EQ(output.y, (0.2) / 5);\n  EXPECT_FLOAT_EQ(output.z, (0.3) / 6);\n}\n\nTEST_F(TextureHelperTest, MapPoseToTexCoordIndResTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  float3 input = make_float3(1.0, 2.0, 3.0);\n  float3 output;\n\n  float3 resolution = make_float3(10, 20, 30);\n  helper.updateResolution(0, resolution);\n  cudaExtent extent = make_cudaExtent(4, 5, 6);\n  helper.setExtent(0, extent);\n  helper.copyToDevice();\n\n  helper.mapPoseToTexCoord(0, input, output);\n  EXPECT_FLOAT_EQ(input.x, 1.0);\n  EXPECT_FLOAT_EQ(input.y, 2.0);\n  EXPECT_FLOAT_EQ(input.z, 3.0);\n  EXPECT_FLOAT_EQ(output.x, (0.1) / 4);\n  EXPECT_FLOAT_EQ(output.y, (0.1) / 5);\n  EXPECT_FLOAT_EQ(output.z, (0.1) / 6);\n}\n\nTEST_F(TextureHelperTest, WorldPoseToTexCoordTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  float3 input = make_float3(0.2, 0.4, 0.6);\n  float3 output;\n\n  helper.updateResolution(0, 10);\n  cudaExtent extent = make_cudaExtent(4, 5, 6);\n  helper.setExtent(0, extent);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(1, 2, 3);\n  new_rot_mat[1] = make_float3(4, 5, 6);\n  new_rot_mat[2] = make_float3(7, 8, 9);\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(0.1, 0.2, 0.3));\n  helper.copyToDevice();\n\n  helper.worldPoseToTexCoord(0, input, output);\n  EXPECT_FLOAT_EQ(input.x, 0.2);\n  EXPECT_FLOAT_EQ(input.y, 0.4);\n  EXPECT_FLOAT_EQ(input.z, 0.6);\n  EXPECT_FLOAT_EQ(output.x, ((0.1 * 1 + 0.2 * 2 + 0.3 * 3) / 10) / 4);\n  EXPECT_FLOAT_EQ(output.y, ((0.1 * 4 + 0.2 * 5 + 0.3 * 6) / 10) / 5);\n  EXPECT_FLOAT_EQ(output.z, ((0.1 * 7 + 0.2 * 8 + 0.3 * 9) / 10) / 6);\n}\n\nTEST_F(TextureHelperTest, BodyOffsetToTexCoordTest)\n{\n  int number = 5;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n\n  const float3 input = make_float3(-0.9, -0.9, -0.9);\n  float3 output;\n\n  helper.updateResolution(0, 10);\n  cudaExtent extent = make_cudaExtent(4, 5, 6);\n  helper.setExtent(0, extent);\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(1, 2, 3);\n  new_rot_mat[1] = make_float3(4, 5, 6);\n  new_rot_mat[2] = make_float3(7, 8, 9);\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(0.1, 0.2, 0.3));\n  helper.copyToDevice();\n\n  float3 offset = make_float3(1.1, 1.3, 1.5);\n  float3 rotation = make_float3(0, 0, 0);\n\n  helper.bodyOffsetWorldToTexCoord(0, offset, input, rotation, output);\n  EXPECT_FLOAT_EQ(output.x, ((0.1 * 1 + 0.2 * 2 + 0.3 * 3) / 10) / 4);\n  EXPECT_FLOAT_EQ(output.y, ((0.1 * 4 + 0.2 * 5 + 0.3 * 6) / 10) / 5);\n  EXPECT_FLOAT_EQ(output.z, ((0.1 * 7 + 0.2 * 8 + 0.3 * 9) / 10) / 6);\n}\n\nTEST_F(TextureHelperTest, AddNewTextureTest)\n{\n  int number = 8;\n  TextureHelperImpl helper = TextureHelperImpl(number);\n  cudaExtent extent = make_cudaExtent(4, 5, 6);\n  helper.addNewTexture(extent);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_EQ(textures.size(), number + 1);\n  for (int i = 0; i < number + 1; i++)\n  {\n    auto texture = textures.at(i);\n\n    EXPECT_EQ(texture.use, false);\n    // since GPUMemStatus_ = false, this should always be false\n    EXPECT_EQ(texture.allocated, false);\n    EXPECT_EQ(texture.update_data, false);\n    EXPECT_EQ(texture.update_mem, false);\n    EXPECT_EQ(texture.resDesc.resType, cudaResourceTypeArray);\n    EXPECT_EQ(texture.channelDesc.x, 32);\n    EXPECT_EQ(texture.channelDesc.y, 32);\n    EXPECT_EQ(texture.channelDesc.z, 32);\n    EXPECT_EQ(texture.channelDesc.w, 32);\n\n    EXPECT_EQ(texture.texDesc.addressMode[0], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.addressMode[1], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.filterMode, cudaFilterModeLinear);\n    EXPECT_EQ(texture.texDesc.readMode, cudaReadModeElementType);\n    EXPECT_EQ(texture.texDesc.normalizedCoords, 1);\n  }\n  EXPECT_EQ(textures[8].extent.width, 4);\n  EXPECT_EQ(textures[8].extent.height, 5);\n  EXPECT_EQ(textures[8].extent.depth, 6);\n\n  helper.setGpuMemStatus(true);\n  helper.addNewTexture(extent);\n  textures = helper.getTextures();\n  EXPECT_EQ(textures.size(), number + 2);\n  for (int i = 0; i < number + 2; i++)\n  {\n    auto texture = textures.at(i);\n\n    EXPECT_EQ(texture.use, false);\n    if (i == 9)\n    {\n      EXPECT_EQ(texture.allocated, true);\n    }\n    else\n    {\n      EXPECT_EQ(texture.allocated, false);\n    }\n    EXPECT_EQ(texture.update_data, false);\n    EXPECT_EQ(texture.update_mem, false);\n    EXPECT_EQ(texture.resDesc.resType, cudaResourceTypeArray);\n    EXPECT_EQ(texture.channelDesc.x, 32);\n    EXPECT_EQ(texture.channelDesc.y, 32);\n    EXPECT_EQ(texture.channelDesc.z, 32);\n    EXPECT_EQ(texture.channelDesc.w, 32);\n\n    EXPECT_EQ(texture.texDesc.addressMode[0], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.addressMode[1], cudaAddressModeClamp);\n    EXPECT_EQ(texture.texDesc.filterMode, cudaFilterModeLinear);\n    EXPECT_EQ(texture.texDesc.readMode, cudaReadModeElementType);\n    EXPECT_EQ(texture.texDesc.normalizedCoords, 1);\n  }\n  EXPECT_EQ(textures[9].extent.width, 4);\n  EXPECT_EQ(textures[9].extent.height, 5);\n  EXPECT_EQ(textures[9].extent.depth, 6);\n}\n"
  },
  {
    "path": "tests/texture_helpers/three_d_texture_helper_test.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n\n#include <mppi/utils/texture_helpers/three_d_texture_helper.cuh>\n#include <kernel_tests/utils/texture_test_kernels.cuh>\n\nclass ThreeDTextureHelperTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    CudaCheckError();\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(100.0, 2.0);\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(ThreeDTextureHelperTest, Constructor)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(4);\n\n  EXPECT_EQ(helper.getCpuValues().size(), 4);\n  EXPECT_EQ(helper.getLayerCopy().size(), 4);\n}\n\nTEST_F(ThreeDTextureHelperTest, ThreeDAllocateCudaTextureTest)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 1);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 1);\n  helper.setExtent(1, extent);\n\n  std::vector<TextureParams<float4>> textures = helper.getBufferTextures();\n  EXPECT_EQ(textures[0].array_d, nullptr);\n  EXPECT_EQ(textures[0].tex_d, 0);\n  EXPECT_TRUE(textures[0].update_mem);\n  EXPECT_FALSE(textures[0].allocated);\n\n  helper.GPUSetup();\n\n  textures = helper.getTextures();\n  EXPECT_NE(textures[0].array_d, nullptr);\n  EXPECT_NE(textures[0].tex_d, 0);\n  EXPECT_FALSE(textures[0].update_mem);\n  EXPECT_TRUE(textures[0].allocated);\n}\n\nTEST_F(ThreeDTextureHelperTest, UpdateTexture)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(1);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 2);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(0, 0, data_vec);\n\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i + 200, i + 200 + 1, i + 200 + 2, i + 200 + 3);\n  }\n  helper.updateTexture(0, 1, data_vec);\n\n  std::vector<TextureParams<float4>> textures = helper.getBufferTextures();\n  EXPECT_TRUE(textures[0].update_data);\n\n  auto cpu_values = helper.getCpuBufferValues()[0];\n  for (int i = 0; i < cpu_values.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(cpu_values[i].x, i);\n    EXPECT_FLOAT_EQ(cpu_values[i].y, i + 1);\n    EXPECT_FLOAT_EQ(cpu_values[i].z, i + 2);\n    EXPECT_FLOAT_EQ(cpu_values[i].w, i + 3);\n  }\n}\n\nTEST_F(ThreeDTextureHelperTest, UpdateTextureColumnMajor)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(3);\n  cudaExtent extent = make_cudaExtent(10, 20, 3);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  // just in case I am stupid\n  std::set<float> total_set;\n  for (int depth = 0; depth < 3; depth++)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      float val = depth * 20 * 10 + i;\n      data_vec[i] = make_float4(val, val + 1, val + 2, val + 3);\n      total_set.insert(val);\n    }\n    EXPECT_EQ(total_set.size(), 200 * (depth + 1));\n    helper.updateTexture(0, depth, data_vec, true);\n  }\n\n  // returns a rowMajor vector\n  auto cpu_values = helper.getCpuBufferValues()[0];\n\n  for (int k = 0; k < 3; k++)\n  {\n    for (int i = 0; i < 20; i++)\n    {\n      for (int j = 0; j < 10; j++)\n      {\n        int columnMajorIndex = (k * 10 * 20) + j * 20 + i;\n        int rowVectorIndex = (k * 10 * 20) + i * 10 + j;\n        EXPECT_FLOAT_EQ(cpu_values[rowVectorIndex].x, columnMajorIndex) << \" at index: \" << i;\n        EXPECT_EQ(total_set.erase(rowVectorIndex), 1);\n      }\n    }\n  }\n  EXPECT_EQ(total_set.size(), 0);\n}\n\nTEST_F(ThreeDTextureHelperTest, UpdateTextureException)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 1);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 1);\n  helper.setExtent(1, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(30 * 40);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  EXPECT_THROW(helper.updateTexture(0, 0, data_vec), std::runtime_error);\n}\n\nTEST_F(ThreeDTextureHelperTest, CopyDataToGPUOneGo)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 5);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int depth = 0; depth < 5; depth++)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      float val = depth * 20 * 10 + i;\n      data_vec[i] = make_float4(val, val + 1, val + 2, val + 3);\n    }\n    helper.updateTexture(0, depth, data_vec);\n  }\n\n  helper.copyToDevice(true);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_TRUE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].update_mem);\n\n  std::vector<float4> query_points;\n  // X\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.05, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.95, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(1.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.45, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.5, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.55, 0.0, 0.0, 0.0));\n\n  // Y\n  query_points.push_back(make_float4(0.0, 0.025, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.05, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.075, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.975, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 1.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.475, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.5, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.525, 0.0, 0.0));\n\n  // Z\n  query_points.push_back(make_float4(0.0, 0.0, 0.1, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.2, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.3, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 1.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.9, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.8, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.7, 0.0));\n\n  auto results = getTextureAtPointsKernel<ThreeDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  EXPECT_FLOAT_EQ(results[0].x, 0.0);\n  EXPECT_FLOAT_EQ(results[0].y, 1.0);\n  EXPECT_FLOAT_EQ(results[0].z, 2.0);\n  EXPECT_FLOAT_EQ(results[0].w, 3.0);\n  EXPECT_FLOAT_EQ(results[1].x, 0.0);\n  EXPECT_FLOAT_EQ(results[1].y, 1.0);\n  EXPECT_FLOAT_EQ(results[1].z, 2.0);\n  EXPECT_FLOAT_EQ(results[1].w, 3.0);\n\n  EXPECT_FLOAT_EQ(results[2].x, 9.0);\n  EXPECT_FLOAT_EQ(results[2].y, 10.0);\n  EXPECT_FLOAT_EQ(results[2].z, 11.0);\n  EXPECT_FLOAT_EQ(results[2].w, 12.0);\n  EXPECT_FLOAT_EQ(results[3].x, 9.0);\n  EXPECT_FLOAT_EQ(results[3].y, 10.0);\n  EXPECT_FLOAT_EQ(results[3].z, 11.0);\n  EXPECT_FLOAT_EQ(results[3].w, 12.0);\n\n  EXPECT_FLOAT_EQ(results[4].x, 4.0);\n  EXPECT_FLOAT_EQ(results[4].y, 5.0);\n  EXPECT_FLOAT_EQ(results[4].z, 6.0);\n  EXPECT_FLOAT_EQ(results[4].w, 7.0);\n  EXPECT_FLOAT_EQ(results[5].x, 4.5);\n  EXPECT_FLOAT_EQ(results[5].y, 5.5);\n  EXPECT_FLOAT_EQ(results[5].z, 6.5);\n  EXPECT_FLOAT_EQ(results[5].w, 7.5);\n  EXPECT_FLOAT_EQ(results[6].x, 5.0);\n  EXPECT_FLOAT_EQ(results[6].y, 6.0);\n  EXPECT_FLOAT_EQ(results[6].z, 7.0);\n  EXPECT_FLOAT_EQ(results[6].w, 8.0);\n\n  EXPECT_FLOAT_EQ(results[7].x, 0.0);\n  EXPECT_FLOAT_EQ(results[8].x, 5);\n  EXPECT_FLOAT_EQ(results[9].x, 10);\n\n  EXPECT_FLOAT_EQ(results[10].x, 190);\n  EXPECT_FLOAT_EQ(results[11].x, 190);\n\n  EXPECT_FLOAT_EQ(results[12].x, 90);\n  EXPECT_FLOAT_EQ(results[13].x, 95);\n  EXPECT_FLOAT_EQ(results[14].x, 100);\n\n  EXPECT_FLOAT_EQ(results[15].x, 0.0);\n  EXPECT_FLOAT_EQ(results[16].x, 100.0);\n  EXPECT_FLOAT_EQ(results[17].x, 200.0);\n  EXPECT_FLOAT_EQ(results[18].x, 800.0);\n  EXPECT_FLOAT_EQ(results[19].x, 800.0);\n  EXPECT_FLOAT_EQ(results[20].x, 700.0);\n  EXPECT_FLOAT_EQ(results[21].x, 600.0);\n}\n\nTEST_F(ThreeDTextureHelperTest, TestCPUQuery)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(1);\n  helper.GPUSetup();\n  const int WIDTH = 10;\n  const int HEIGHT = 20;\n  const int DEPTH = 5;\n\n  cudaExtent extent = make_cudaExtent(WIDTH, HEIGHT, DEPTH);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(WIDTH * HEIGHT);\n  for (int depth = 0; depth < DEPTH; depth++)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      float val = depth * HEIGHT * WIDTH + i;\n      data_vec[i] = make_float4(val, val + 1, val + 2, val + 3);\n    }\n    helper.updateTexture(0, depth, data_vec);\n  }\n\n  helper.copyToDevice(true);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_TRUE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].update_mem);\n\n  std::vector<float4> query_points;\n  // X\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.05, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.95, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(1.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.45, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.5, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.55, 0.0, 0.0, 0.0));\n\n  // Y\n  query_points.push_back(make_float4(0.0, 0.025, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.05, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.075, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.975, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 1.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.475, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.5, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.525, 0.0, 0.0));\n\n  // Z\n  query_points.push_back(make_float4(0.0, 0.0, 0.1, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.2, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.3, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 1.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.9, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.8, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.7, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 1.2, 0.0));\n\n  std::vector<float4> results;\n  for (const auto& query : query_points)\n  {\n    float3 xyz_point = make_float3(query.x, query.y, query.z);\n    int index = round(query.w);\n    results.push_back(helper.queryTexture(index, xyz_point));\n  }\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  EXPECT_FLOAT_EQ(results[0].x, 0.0);\n  EXPECT_FLOAT_EQ(results[0].y, 1.0);\n  EXPECT_FLOAT_EQ(results[0].z, 2.0);\n  EXPECT_FLOAT_EQ(results[0].w, 3.0);\n  EXPECT_FLOAT_EQ(results[1].x, 0.0);\n  EXPECT_FLOAT_EQ(results[1].y, 1.0);\n  EXPECT_FLOAT_EQ(results[1].z, 2.0);\n  EXPECT_FLOAT_EQ(results[1].w, 3.0);\n\n  EXPECT_FLOAT_EQ(results[2].x, 9.0);\n  EXPECT_FLOAT_EQ(results[2].y, 10.0);\n  EXPECT_FLOAT_EQ(results[2].z, 11.0);\n  EXPECT_FLOAT_EQ(results[2].w, 12.0);\n  EXPECT_FLOAT_EQ(results[3].x, 9.0);\n  EXPECT_FLOAT_EQ(results[3].y, 10.0);\n  EXPECT_FLOAT_EQ(results[3].z, 11.0);\n  EXPECT_FLOAT_EQ(results[3].w, 12.0);\n\n  EXPECT_FLOAT_EQ(results[4].x, 4.0);\n  EXPECT_FLOAT_EQ(results[4].y, 5.0);\n  EXPECT_FLOAT_EQ(results[4].z, 6.0);\n  EXPECT_FLOAT_EQ(results[4].w, 7.0);\n  EXPECT_FLOAT_EQ(results[5].x, 4.5);\n  EXPECT_FLOAT_EQ(results[5].y, 5.5);\n  EXPECT_FLOAT_EQ(results[5].z, 6.5);\n  EXPECT_FLOAT_EQ(results[5].w, 7.5);\n  EXPECT_FLOAT_EQ(results[6].x, 5.0);\n  EXPECT_FLOAT_EQ(results[6].y, 6.0);\n  EXPECT_FLOAT_EQ(results[6].z, 7.0);\n  EXPECT_FLOAT_EQ(results[6].w, 8.0);\n\n  EXPECT_FLOAT_EQ(results[7].x, 0.0);\n  EXPECT_FLOAT_EQ(results[8].x, 5);\n  EXPECT_FLOAT_EQ(results[9].x, 10);\n\n  EXPECT_FLOAT_EQ(results[10].x, 190);\n  EXPECT_FLOAT_EQ(results[11].x, 190);\n\n  EXPECT_FLOAT_EQ(results[12].x, 90);\n  EXPECT_FLOAT_EQ(results[13].x, 95);\n  EXPECT_FLOAT_EQ(results[14].x, 100);\n\n  EXPECT_FLOAT_EQ(results[15].x, 0.0);\n  EXPECT_FLOAT_EQ(results[16].x, 100.0);\n  EXPECT_FLOAT_EQ(results[17].x, 200.0);\n  EXPECT_FLOAT_EQ(results[18].x, 800.0);\n  EXPECT_FLOAT_EQ(results[19].x, 800.0);\n  EXPECT_FLOAT_EQ(results[20].x, 700.0);\n  EXPECT_FLOAT_EQ(results[21].x, 600.0);\n  EXPECT_FLOAT_EQ(results[22].x, 800.0);\n}\n\nTEST_F(ThreeDTextureHelperTest, CopyDataToGPUSplitMiddle)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 5);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  // 0, 200, 400, 600, 800\n  for (int depth = 0; depth < 5; depth++)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      float val = depth * 20 * 10 + i;\n      data_vec[i] = make_float4(val, val + 1, val + 2, val + 3);\n    }\n    helper.updateTexture(0, depth, data_vec);\n  }\n\n  // fill in even indexes\n  // 0, 1200, 400, 1600, 800\n  for (int depth = 1; depth < 5; depth += 2)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      float val = (depth + 5) * 20 * 10 + i;\n      data_vec[i] = make_float4(val, val + 1, val + 2, val + 3);\n    }\n    helper.updateTexture(0, depth, data_vec);\n  }\n  helper.copyToDevice(true);\n\n  std::vector<float4> query_points;\n  query_points.push_back(make_float4(0.0, 0.0, 0.1, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.2, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.3, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.5, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.6, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.7, 0.0));\n\n  auto results = getTextureAtPointsKernel<ThreeDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  EXPECT_FLOAT_EQ(results[0].x, 0.0);\n  EXPECT_FLOAT_EQ(results[1].x, 600);\n  EXPECT_FLOAT_EQ(results[2].x, 1200);\n  EXPECT_FLOAT_EQ(results[3].x, 400);\n  EXPECT_FLOAT_EQ(results[4].x, 1000);\n  EXPECT_FLOAT_EQ(results[5].x, 1600);\n\n  // fill in odd indexes\n  // 2000, 1200, 2400, 1600, 2800\n  for (int depth = 0; depth < 5; depth += 2)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      float val = (depth + 10) * 20 * 10 + i;\n      data_vec[i] = make_float4(val, val + 1, val + 2, val + 3);\n    }\n    helper.updateTexture(0, depth, data_vec);\n  }\n  helper.copyToDevice(true);\n\n  results = getTextureAtPointsKernel<ThreeDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  EXPECT_FLOAT_EQ(results[0].x, 2000.0);\n  EXPECT_FLOAT_EQ(results[1].x, 1600);\n  EXPECT_FLOAT_EQ(results[2].x, 1200);\n  EXPECT_FLOAT_EQ(results[3].x, 2400);\n  EXPECT_FLOAT_EQ(results[4].x, 2000);\n  EXPECT_FLOAT_EQ(results[5].x, 1600);\n}\n\nTEST_F(ThreeDTextureHelperTest, CheckWrapping)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(1);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 2);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(0, 0, data_vec);\n\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i + 200, i + 200 + 1, i + 200 + 2, i + 200 + 3);\n  }\n  helper.updateTexture(0, 1, data_vec);\n  helper.updateAddressMode(0, 2, cudaAddressModeWrap);\n  helper.GPUSetup();\n  helper.copyToDevice(true);\n\n  std::vector<float4> query_points;\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.25, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.5, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.75, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 1.0, 0.0));\n\n  auto results = getTextureAtPointsKernel<ThreeDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  EXPECT_FLOAT_EQ(results[0].x, 100.0);\n  EXPECT_FLOAT_EQ(results[1].x, 0);\n  EXPECT_FLOAT_EQ(results[2].x, 100);\n  EXPECT_FLOAT_EQ(results[3].x, 200);\n  EXPECT_FLOAT_EQ(results[4].x, 100);\n}\n\nTEST_F(ThreeDTextureHelperTest, CheckCPUWrapping)\n{\n  ThreeDTextureHelper<float4> helper = ThreeDTextureHelper<float4>(1);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 2);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(0, 0, data_vec);\n\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i + 200, i + 200 + 1, i + 200 + 2, i + 200 + 3);\n  }\n  helper.updateTexture(0, 1, data_vec);\n  helper.updateAddressMode(0, 2, cudaAddressModeWrap);\n  helper.GPUSetup();\n  helper.copyToDevice(true);\n\n  std::vector<float4> query_points;\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.25, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.5, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 0.75, 0.0));\n  query_points.push_back(make_float4(0.0, 0.0, 1.0, 0.0));\n\n  std::vector<float4> results;\n  for (const auto& query : query_points)\n  {\n    float3 xyz_point = make_float3(query.x, query.y, query.z);\n    int index = round(query.w);\n    results.push_back(helper.queryTexture(index, xyz_point));\n  }\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  EXPECT_FLOAT_EQ(results[0].x, 100.0);\n  EXPECT_FLOAT_EQ(results[1].x, 0);\n  EXPECT_FLOAT_EQ(results[2].x, 100);\n  EXPECT_FLOAT_EQ(results[3].x, 200);\n  EXPECT_FLOAT_EQ(results[4].x, 100);\n}\n"
  },
  {
    "path": "tests/texture_helpers/two_d_texture_helper_test.cu",
    "content": "#include <gtest/gtest.h>\n\n#include <mppi/utils/test_helper.h>\n#include <random>\n#include <algorithm>\n\n#include <mppi/utils/texture_helpers/two_d_texture_helper.cuh>\n#include <kernel_tests/utils/texture_test_kernels.cuh>\n\nclass TwoDTextureHelperTest : public testing::Test\n{\nprotected:\n  void SetUp() override\n  {\n    CudaCheckError();\n    generator = std::default_random_engine(7.0);\n    distribution = std::normal_distribution<float>(1.0, 3.0);\n  }\n\n  void TearDown() override\n  {\n    CudaCheckError();\n  }\n\n  std::default_random_engine generator;\n  std::normal_distribution<float> distribution;\n};\n\nTEST_F(TwoDTextureHelperTest, Constructor)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(4);\n\n  EXPECT_EQ(helper.getCpuValues().size(), 4);\n}\n\nTEST_F(TwoDTextureHelperTest, TwoDAllocateCudaTextureTest)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 0);\n  helper.setExtent(1, extent);\n\n  helper.GPUSetup();\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_NE(textures[0].array_d, nullptr);\n  EXPECT_NE(textures[0].tex_d, 0);\n  EXPECT_FALSE(textures[0].update_mem);\n  EXPECT_TRUE(textures[0].allocated);\n  EXPECT_TRUE(helper.GPUMemStatus_);\n  EXPECT_NE(helper.ptr_d_, nullptr);\n\n  helper.freeCudaMem();\n\n  textures = helper.getTextures();\n  EXPECT_EQ(textures[0].array_d, nullptr);\n  EXPECT_EQ(textures[0].tex_d, 0);\n  EXPECT_FALSE(textures[0].update_mem);\n  EXPECT_FALSE(textures[0].allocated);\n  EXPECT_FALSE(helper.GPUMemStatus_);\n  EXPECT_EQ(helper.ptr_d_, nullptr);\n}\n\nTEST_F(TwoDTextureHelperTest, UpdateTextureRowMajor)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 0);\n  helper.setExtent(1, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(30 * 40);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(1, data_vec);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  std::vector<TextureParams<float4>> textures_buffer = helper.getBufferTextures();\n  EXPECT_FALSE(textures[1].update_data);\n  EXPECT_TRUE(textures_buffer[1].update_data);\n\n  auto cpu_values = helper.getCpuBufferValues()[1];\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(cpu_values[i].x, data_vec[i].x);\n    EXPECT_FLOAT_EQ(cpu_values[i].y, data_vec[i].y);\n    EXPECT_FLOAT_EQ(cpu_values[i].z, data_vec[i].z);\n    EXPECT_FLOAT_EQ(cpu_values[i].w, data_vec[i].w);\n  }\n}\n\nTEST_F(TwoDTextureHelperTest, UpdateTextureColumnMajor)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  // just in case I am stupid\n  std::set<float> total_set;\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n    total_set.insert(i);\n  }\n  EXPECT_EQ(total_set.size(), 200);\n\n  helper.updateTexture(0, data_vec, true);\n\n  // returns a rowMajor vector\n  auto cpu_values = helper.getCpuBufferValues()[0];\n\n  for (int i = 0; i < 20; i++)\n  {\n    for (int j = 0; j < 10; j++)\n    {\n      int rowVectorIndex = i * 10 + j;\n      int columnMajorIndex = j * 20 + i;\n      EXPECT_FLOAT_EQ(cpu_values[rowVectorIndex].x, columnMajorIndex) << \" at index: \" << i;\n      EXPECT_EQ(total_set.erase(rowVectorIndex), 1);\n    }\n  }\n  EXPECT_EQ(total_set.size(), 0);\n}\n\nTEST_F(TwoDTextureHelperTest, EigenUpdateTextureRowMajor)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 0);\n  helper.setExtent(1, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(30 * 40);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  int outer_stride = 0;\n  int inner_stride = 0;\n  Eigen::Map<const Eigen::Matrix<float4, Eigen::Dynamic, Eigen::Dynamic>, 0,\n             Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>\n      eigen_mat(data_vec.data(), 40, 30, Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>(outer_stride, inner_stride));\n  helper.updateTexture(1, eigen_mat, false);\n\n  std::vector<TextureParams<float4>> textures = helper.getBufferTextures();\n  EXPECT_TRUE(textures[1].update_data);\n\n  auto cpu_values = helper.getCpuBufferValues()[1];\n  EXPECT_EQ(cpu_values.size(), 30 * 40);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    EXPECT_FLOAT_EQ(cpu_values[i].x, data_vec[i].x);\n    EXPECT_FLOAT_EQ(cpu_values[i].y, data_vec[i].y);\n    EXPECT_FLOAT_EQ(cpu_values[i].z, data_vec[i].z);\n    EXPECT_FLOAT_EQ(cpu_values[i].w, data_vec[i].w);\n  }\n}\n\nTEST_F(TwoDTextureHelperTest, EigenUpdateTextureColumnMajor)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  // just in case I am stupid\n  std::set<float> total_set;\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n    total_set.insert(i);\n  }\n  EXPECT_EQ(total_set.size(), 200);\n\n  int outer_stride = 0;\n  int inner_stride = 0;\n  Eigen::Map<const Eigen::Matrix<float4, Eigen::Dynamic, Eigen::Dynamic>, 0,\n             Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>>\n      eigen_mat(data_vec.data(), 40, 30, Eigen::Stride<Eigen::Dynamic, Eigen::Dynamic>(outer_stride, inner_stride));\n\n  helper.updateTexture(0, eigen_mat);\n\n  // returns a rowMajor vector\n  auto cpu_values = helper.getCpuBufferValues()[0];\n  EXPECT_EQ(cpu_values.size(), 200);\n\n  for (int i = 0; i < 20; i++)\n  {\n    for (int j = 0; j < 10; j++)\n    {\n      int columnMajorIndex = j * 20 + i;\n      int rowVectorIndex = i * 10 + j;\n      EXPECT_FLOAT_EQ(cpu_values[rowVectorIndex].x, columnMajorIndex) << \" at index: \" << i;\n      EXPECT_EQ(total_set.erase(rowVectorIndex), 1);\n    }\n  }\n  EXPECT_EQ(total_set.size(), 0);\n}\n\nTEST_F(TwoDTextureHelperTest, UpdateTextureException)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 0);\n  helper.setExtent(1, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(30 * 40);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  EXPECT_THROW(helper.updateTexture(0, data_vec), std::runtime_error);\n}\n\nvoid checkEqual(float4 computed, float val)\n{\n  EXPECT_FLOAT_EQ(computed.x, val);\n  EXPECT_FLOAT_EQ(computed.y, val + 1);\n  EXPECT_FLOAT_EQ(computed.z, val + 2);\n  EXPECT_FLOAT_EQ(computed.w, val + 3);\n}\n\nTEST_F(TwoDTextureHelperTest, UpdateTextureNewSize)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(2);\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n  extent = make_cudaExtent(30, 40, 0);\n  helper.setExtent(1, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(30 * 40);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(0, data_vec, extent);\n\n  std::vector<TextureParams<float4>> textures = helper.getBufferTextures();\n  EXPECT_TRUE(textures[0].update_data);\n  EXPECT_TRUE(textures[0].update_mem);\n\n  auto cpu_values = helper.getCpuBufferValues()[0];\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    checkEqual(cpu_values[i], data_vec[i].x);\n  }\n}\n\nvoid checkEqualTexGPUCPU(float4 computed, float val, TwoDTextureHelper<float4>& helper, float4 query_point)\n{\n  checkEqual(computed, val);\n  checkEqual(helper.queryTexture(query_point.w, make_float3(query_point.x, query_point.y, query_point.z)), val);\n}\n\nTEST_F(TwoDTextureHelperTest, CopyDataToGPU)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(0, data_vec);\n  helper.copyToDevice(true);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_TRUE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].update_mem);\n\n  std::vector<float4> query_points;\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.05, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.95, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(1.0, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.45, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.5, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.55, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.025, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.05, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.075, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.975, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 1.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.475, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.5, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.525, 0.0, 0.0));\n\n  query_points.push_back(make_float4(2.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(2.0, 0.075, 0.0, 0.0));\n  query_points.push_back(make_float4(-2.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(-2.0, 0.075, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 2.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, -2.0, 0.0, 0.0));\n\n  auto results = getTextureAtPointsKernel<TwoDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  checkEqualTexGPUCPU(results[0], 0.0, helper, query_points[0]);\n  checkEqualTexGPUCPU(results[1], 0.0, helper, query_points[1]);\n\n  checkEqualTexGPUCPU(results[2], 9.0, helper, query_points[2]);\n  checkEqualTexGPUCPU(results[3], 9.0, helper, query_points[3]);\n\n  checkEqualTexGPUCPU(results[4], 4.0, helper, query_points[4]);\n  checkEqualTexGPUCPU(results[5], 4.5, helper, query_points[5]);\n  checkEqualTexGPUCPU(results[6], 5.0, helper, query_points[6]);\n\n  checkEqualTexGPUCPU(results[7], 0.0, helper, query_points[7]);\n  checkEqualTexGPUCPU(results[8], 0.0, helper, query_points[8]);\n  checkEqualTexGPUCPU(results[9], 5.0, helper, query_points[9]);\n  checkEqualTexGPUCPU(results[10], 10.0, helper, query_points[10]);\n\n  checkEqualTexGPUCPU(results[11], 190.0, helper, query_points[11]);\n  checkEqualTexGPUCPU(results[12], 190.0, helper, query_points[12]);\n\n  checkEqualTexGPUCPU(results[13], 90.0, helper, query_points[13]);\n  checkEqualTexGPUCPU(results[14], 95.0, helper, query_points[14]);\n  checkEqualTexGPUCPU(results[15], 100.0, helper, query_points[15]);\n\n  checkEqualTexGPUCPU(results[16], 9.0, helper, query_points[16]);\n  checkEqualTexGPUCPU(results[17], 19.0, helper, query_points[17]);\n  checkEqualTexGPUCPU(results[18], 0.0, helper, query_points[18]);\n  checkEqualTexGPUCPU(results[19], 10.0, helper, query_points[19]);\n  checkEqualTexGPUCPU(results[20], 190.0, helper, query_points[20]);\n  checkEqualTexGPUCPU(results[21], 0.0, helper, query_points[21]);\n}\n\nvoid checkEqualMapGPUCPU(float4 computed, float val, TwoDTextureHelper<float4>& helper, float4 query_point)\n{\n  checkEqual(computed, val);\n  checkEqual(helper.queryTextureAtMapPose(query_point.w, make_float3(query_point.x, query_point.y, query_point.z)),\n             val);\n}\n\nTEST_F(TwoDTextureHelperTest, QueryTextureAtMapPose)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  helper.updateTexture(0, data_vec);\n  helper.updateResolution(0, 10);\n  helper.copyToDevice(true);\n\n  std::vector<float4> query_points;\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.05, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.95, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(1.0, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.45, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.5, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.55, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.025, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.05, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.075, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.975, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 1.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.475, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.5, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.525, 0.0, 0.0));\n\n  for (int i = 0; i < query_points.size(); i++)\n  {\n    // resolution * normalized handling\n    query_points[i].x = query_points[i].x * 10 * 10;\n    query_points[i].y = query_points[i].y * 10 * 20;\n    query_points[i].z = query_points[i].z * 10;\n  }\n\n  auto results = getTextureAtMapPointsKernel<TwoDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  checkEqualMapGPUCPU(results[0], 0.0, helper, query_points[0]);\n  checkEqualMapGPUCPU(results[1], 0.0, helper, query_points[1]);\n\n  checkEqualMapGPUCPU(results[2], 9.0, helper, query_points[2]);\n  checkEqualMapGPUCPU(results[3], 9.0, helper, query_points[3]);\n\n  checkEqualMapGPUCPU(results[4], 4.0, helper, query_points[4]);\n  checkEqualMapGPUCPU(results[5], 4.5, helper, query_points[5]);\n  checkEqualMapGPUCPU(results[6], 5.0, helper, query_points[6]);\n\n  checkEqualMapGPUCPU(results[7], 0.0, helper, query_points[7]);\n  checkEqualMapGPUCPU(results[8], 0.0, helper, query_points[8]);\n  checkEqualMapGPUCPU(results[9], 5.0, helper, query_points[9]);\n  checkEqualMapGPUCPU(results[10], 10., helper, query_points[10]);\n\n  checkEqualMapGPUCPU(results[11], 190.0, helper, query_points[11]);\n  checkEqualMapGPUCPU(results[12], 190.0, helper, query_points[12]);\n\n  checkEqualMapGPUCPU(results[13], 90.0, helper, query_points[13]);\n  checkEqualMapGPUCPU(results[14], 95.0, helper, query_points[14]);\n  checkEqualMapGPUCPU(results[15], 100.0, helper, query_points[15]);\n}\n\nvoid checkEqualWorldGPUCPU(float4 computed, float val, TwoDTextureHelper<float4>& helper, float4 query_point)\n{\n  checkEqual(computed, val);\n  checkEqual(helper.queryTextureAtWorldPose(query_point.w, make_float3(query_point.x, query_point.y, query_point.z)),\n             val);\n}\n\nTEST_F(TwoDTextureHelperTest, QueryTextureAtWorldPose)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(1, 2, 3));\n\n  helper.updateTexture(0, data_vec);\n  helper.updateResolution(0, 10);\n  helper.copyToDevice(true);\n\n  std::vector<float4> query_points;\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.05, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.95, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(1.0, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.45, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.5, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.55, 0.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.0, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.025, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.05, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.075, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.975, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 1.0, 0.0, 0.0));\n\n  query_points.push_back(make_float4(0.0, 0.475, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.5, 0.0, 0.0));\n  query_points.push_back(make_float4(0.0, 0.525, 0.0, 0.0));\n\n  for (int i = 0; i < query_points.size(); i++)\n  {\n    if (i == 4)\n    {\n      printf(\"correct tex value at %d: %f %f\\n\", i, query_points[i].x, query_points[i].y);\n    }\n    float4 temp = query_points[i];\n    // resolution * normalized handling\n    query_points[i].x = temp.y * 10.0 * 20.0 + 1;\n    query_points[i].y = temp.x * 10.0 * 10.0 + 2;\n    query_points[i].z = temp.z * 10.0 + 3;\n    if (i == 4)\n    {\n      printf(\"starting input at %d: %f %f\\n\", i, query_points[i].x, query_points[i].y);\n    }\n  }\n\n  auto results = getTextureAtWorldPointsKernel<TwoDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  checkEqualWorldGPUCPU(results[0], 0.0, helper, query_points[0]);\n  checkEqualWorldGPUCPU(results[1], 0.0, helper, query_points[1]);\n\n  checkEqualWorldGPUCPU(results[2], 9.0, helper, query_points[2]);\n  checkEqualWorldGPUCPU(results[3], 9.0, helper, query_points[3]);\n\n  checkEqualWorldGPUCPU(results[4], 4.0, helper, query_points[4]);\n  checkEqualWorldGPUCPU(results[5], 4.5, helper, query_points[5]);\n  checkEqualWorldGPUCPU(results[6], 5.0, helper, query_points[6]);\n\n  checkEqualWorldGPUCPU(results[7], 0.0, helper, query_points[7]);\n  checkEqualWorldGPUCPU(results[8], 0.0, helper, query_points[8]);\n  checkEqualWorldGPUCPU(results[9], 5.0, helper, query_points[9]);\n  checkEqualWorldGPUCPU(results[10], 10., helper, query_points[10]);\n\n  checkEqualWorldGPUCPU(results[11], 190.0, helper, query_points[11]);\n  checkEqualWorldGPUCPU(results[12], 190.0, helper, query_points[12]);\n\n  checkEqualWorldGPUCPU(results[13], 90.0, helper, query_points[13]);\n  checkEqualWorldGPUCPU(results[14], 95.0, helper, query_points[14]);\n  checkEqualWorldGPUCPU(results[15], 100.0, helper, query_points[15]);\n}\n\nTEST_F(TwoDTextureHelperTest, GPUCPUTextureLookupMatch)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(1, 2, 3));\n\n  helper.updateTexture(0, data_vec);\n  helper.updateResolution(0, 10);\n  helper.copyToDevice(true);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_TRUE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].update_mem);\n\n  std::vector<float4> query_points;\n  for (int i = 0; i < 500; i++)\n  {\n    query_points.push_back(make_float4(distribution(generator), distribution(generator), distribution(generator), 0.0));\n  }\n\n  auto results = getTextureAtPointsKernel<TwoDTextureHelper<float4>, float4>(helper, query_points);\n\n  EXPECT_FLOAT_EQ(results.size(), query_points.size());\n  for (int i = 0; i < results.size(); i++)\n  {\n    float4 CPU_result =\n        helper.queryTexture(query_points[i].w, make_float3(query_points[i].x, query_points[i].y, query_points[i].z));\n    EXPECT_NEAR(results[i].x, CPU_result.x, 0.05);\n    EXPECT_NEAR(results[i].y, CPU_result.y, 0.05);\n    EXPECT_NEAR(results[i].z, CPU_result.z, 0.05);\n    EXPECT_NEAR(results[i].w, CPU_result.w, 0.05);\n  }\n}\n\nTEST_F(TwoDTextureHelperTest, GPUCPUTextureLookupMatchManyUpdates)\n{\n  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(1);\n  helper.GPUSetup();\n\n  cudaExtent extent = make_cudaExtent(10, 20, 0);\n  helper.setExtent(0, extent);\n\n  std::vector<float4> data_vec;\n  data_vec.resize(10 * 20);\n  for (int i = 0; i < data_vec.size(); i++)\n  {\n    data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n  }\n\n  std::array<float3, 3> new_rot_mat{};\n  new_rot_mat[0] = make_float3(0, 1, 0);\n  new_rot_mat[1] = make_float3(1, 0, 0);\n  new_rot_mat[2] = make_float3(0, 0, 1);\n  helper.updateRotation(0, new_rot_mat);\n  helper.updateOrigin(0, make_float3(1, 2, 3));\n  helper.updateTexture(0, data_vec);\n\n  helper.updateResolution(0, 10);\n  helper.copyToDevice(true);\n\n  std::vector<TextureParams<float4>> textures = helper.getTextures();\n  EXPECT_TRUE(textures[0].allocated);\n  EXPECT_FALSE(textures[0].update_data);\n  EXPECT_FALSE(textures[0].update_mem);\n\n  std::vector<float4> query_points;\n  for (int i = 0; i < 500; i++)\n  {\n    query_points.push_back(make_float4(distribution(generator), distribution(generator), distribution(generator), 0.0));\n  }\n\n  for (int iterations = 0; iterations < 10; iterations++)\n  {\n    for (int i = 0; i < data_vec.size(); i++)\n    {\n      data_vec[i] = make_float4(i, i + 1, i + 2, i + 3);\n      data_vec[i] *= iterations;\n      helper.updateTexture(0, data_vec);\n      helper.copyToDevice(true);\n    }\n\n    auto results = getTextureAtPointsKernel<TwoDTextureHelper<float4>, float4>(helper, query_points);\n\n    EXPECT_FLOAT_EQ(results.size(), query_points.size());\n    for (int i = 0; i < results.size(); i++)\n    {\n      float4 CPU_result =\n          helper.queryTexture(query_points[i].w, make_float3(query_points[i].x, query_points[i].y, query_points[i].z));\n      EXPECT_NEAR(results[i].x, CPU_result.x, 0.05);\n      EXPECT_NEAR(results[i].y, CPU_result.y, 0.05);\n      EXPECT_NEAR(results[i].z, CPU_result.z, 0.05);\n      EXPECT_NEAR(results[i].w, CPU_result.w, 0.05);\n    }\n  }\n}\n\n// TEST_F(TwoDTextureHelperTest, CopyToDeviceTest)\n//{\n//  int number = 8;\n//  TwoDTextureHelper<float4> helper = TwoDTextureHelper<float4>(number);\n//\n//  //helper.setTestFlagsInParam(0, false, false, false);\n//  //helper.setTestFlagsInParam(1, false, false, true);\n//  //helper.setTestFlagsInParam(2, false, true, false);\n//  //helper.setTestFlagsInParam(3, false, true, true);\n//  //helper.setTestFlagsInParam(4, true, false, false);\n//  //helper.setTestFlagsInParam(5, true, false, true);\n//  //helper.setTestFlagsInParam(6, true, true, false);\n//  //helper.setTestFlagsInParam(7, true, true, true);\n//\n//  // nothing should have happened since GPUMemStatus=False\n//  helper.GPUSetup();\n//  helper.copyToDevice();\n//  std::vector<TextureParams<float4>> textures = helper.getTextures();\n//  textures = helper.getTextures();\n//\n//  EXPECT_FALSE(textures[0].update_mem);\n//  EXPECT_FALSE(textures[0].update_data);\n//  EXPECT_FALSE(textures[0].allocated);\n//\n//  EXPECT_FALSE(textures[1].update_mem);\n//  EXPECT_FALSE(textures[1].update_data);\n//  EXPECT_TRUE(textures[1].allocated);\n//\n//  EXPECT_FALSE(textures[2].update_mem);\n//  EXPECT_TRUE(textures[2].update_data);\n//  EXPECT_FALSE(textures[2].allocated);\n//\n//  EXPECT_FALSE(textures[3].update_mem);\n//  EXPECT_FALSE(textures[3].update_data);  // was TRUE\n//  EXPECT_TRUE(textures[3].allocated);\n//\n//  EXPECT_FALSE(textures[4].update_mem);  // was TRUE\n//  EXPECT_FALSE(textures[4].update_data);\n//  EXPECT_TRUE(textures[4].allocated);  // was FALSE\n//\n//  EXPECT_FALSE(textures[5].update_mem);  // was TRUE\n//  EXPECT_FALSE(textures[5].update_data);\n//  EXPECT_TRUE(textures[5].allocated);\n//\n//  EXPECT_FALSE(textures[6].update_mem);   // was TRUE\n//  EXPECT_FALSE(textures[6].update_data);  // was TRUE\n//  EXPECT_TRUE(textures[6].allocated);\n//\n//  EXPECT_FALSE(textures[7].update_mem);   // was TRUE\n//  EXPECT_FALSE(textures[7].update_data);  // was TRUE\n//  EXPECT_TRUE(textures[7].allocated);\n//\n//  //EXPECT_EQ(helper.copyDataToGPUCalled, 3);\n//  //EXPECT_EQ(helper.allocateCudaTextureCalled, 4);\n//  //EXPECT_EQ(helper.createCudaTextureCalled, 4);\n//}\n"
  }
]